text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
pass
<caret><selection>n = 0
while n:
print("spam")</selection>
pass
|
siosio/intellij-community
|
python/testData/mover/multiLineSelectionDifferentIndentLevelsMoveToEmptyLine_afterDown.py
|
Python
|
apache-2.0
| 74
| 0.081081
|
"""
Creates an Azure serverless function.
"""
from common.methods import set_progress
from infrastructure.models import CustomField
from common.methods import generate_string_from_template
import os, json
def create_custom_fields_as_needed():
CustomField.objects.get_or_create(
name='azure_function_name', type='STR',
defaults={'label': 'Azure function name', 'description': 'Name of a deployed azure function', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='resource_group_name', type='STR',
defaults={'label': 'Azure Resource Group', 'description': 'Used by the Azure blueprints',
'show_as_attribute': True}
)
def run(job, **kwargs):
resource = kwargs.get('resource')
function_name = '{{ function_name }}'
storage_account_name = function_name + "storageaccount"
file_location = "{{ file_location }}"
if file_location.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
file_location = file_location.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
create_custom_fields_as_needed()
#check if function name is already in use
function_name_check = "az functionapp list"
val = os.popen(function_name_check).read()
function_name_check_response = json.loads(val)
used_names = []
for function in function_name_check_response:
used_names.append(function['name'])
if function_name in used_names:
response = "{0} function name is already in use. Please use a different one.".format(function_name)
return "failure", response, ""
#create a resource group for the fucntion
resource_group_name = function_name + "-resource-group"
resource_group_create = 'az group create --name ' + resource_group_name + ' --location westeurope'
os.system(resource_group_create)
#check if storage name is already in use, create a function storage
name_check = "az storage account check-name --name {0}".format(storage_account_name)
name_check_response = json.loads(os.popen(name_check).read())
if name_check_response['nameAvailable']:
create_storage_command = "az storage account create --name {0} --location westeurope --resource-group {1} --sku Standard_LRS".format(storage_account_name, resource_group_name)
os.system(create_storage_command)
else:
return "failure", '{0}'.format(name_check_response['reason']), ""
#create the azure function
create_function_command = "az functionapp create --name " + function_name + " --storage-account " + storage_account_name + " --consumption-plan-location westeurope --resource-group " + resource_group_name
try:
create_fucntion_check = json.loads(os.popen(create_function_command).read())
except Exception as e:
return 'failure', 'the function app could not be created', '{0}'.format(e)
if create_fucntion_check['name'] == function_name:
set_progress('The function app has been succesfully created')
else:
return 'failure', 'The app could not be created', ''
resource.name = function_name
resource.resource_group_name = resource_group_name
resource.save()
fxn = "az functionapp deployment source config-zip -g {0} -n {1} --src {2}".format(resource_group_name, function_name, file_location)
json.loads(os.popen(fxn).read())
return 'success', 'The function has successfully been created.' , ''
|
CloudBoltSoftware/cloudbolt-forge
|
blueprints/azure_functions/create.py
|
Python
|
apache-2.0
| 3,475
| 0.007194
|
#
# Copyright © 2012–2022 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.apps import AppConfig
class AddonsConfig(AppConfig):
name = "weblate.addons"
label = "addons"
verbose_name = "Add-ons"
|
nijel/weblate
|
weblate/addons/apps.py
|
Python
|
gpl-3.0
| 905
| 0
|
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the arpcache module
"""
import os
import subprocess
import sys
import unittest
import mock
import moduletests.src.arpcache
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class TestArpcache(unittest.TestCase):
config_file_path = "/etc/sysctl.d/55-arp-gc_thresh1.conf"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("subprocess.check_output")
def test_detect_noproblem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 0"
self.assertFalse(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
def test_detect_problem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 1"
self.assertTrue(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
"1", "test", "/etc/sysctl.d/55-arp-gc_thresh1.conf: no such file or directory"))
def test_fix_cpe(self, check_output_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(subprocess.CalledProcessError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] 'sysctl -w net.ipv4.neigh.default.gc_thresh1=0' failed for running system\n"))
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="stuff"))
def test_fix_exists_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"something else\n"))
def test_fix_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"net.ipv4.neigh.default.gc_thresh1 = 0\n"))
def test_fix_sudo_true_found_twice(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", side_effect=IOError)
def test_fix_writefail(self, open_mock, exists_mock, check_output_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] Failed to write config to /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
@mock.patch("moduletests.src.arpcache.detect", return_value=False)
def test_run_success(self, detect_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled.\n"))
self.assertTrue(detect_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
def test_run_no_remediate(self, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": False,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
moduletests.src.arpcache.run()
self.assertTrue("[UNFIXED] Remediation impossible without sudo and --remediate.\n"
"-- Running as root/sudo: True\n"
"-- Required --remediate flag specified: False\n"
"[FAILURE] Aggressive arp caching is enabled."
in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.arpcache.backup", return_value=True)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_failure_isfile(self, restore_mock, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"This can cause issues communicating with instances in the same subnet"
in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"This can cause issues communicating with instances in the same subnet"
in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", side_effect=(True, False))
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
def test_run_fix(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled after "
"remediation. Please see the logs for further details\n"))
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", side_effect=Exception)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_detect_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
self.assertTrue(restore_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict", side_effect=Exception)
def test_run_config_exception(self, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue(config_mock.called)
|
gregbdunn/aws-ec2rescue-linux
|
tools/moduletests/unit/test_arpcache.py
|
Python
|
apache-2.0
| 12,661
| 0.003001
|
from .. import scalar_measures
import numpy
from numpy.testing import assert_array_almost_equal
def test_fractional_anisotropy(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
fa = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
ev = numpy.linalg.eigvalsh(tt)
mn = ev.mean()
fa[i] = numpy.sqrt(1.5 * ((ev - mn) ** 2).sum() / (ev ** 2).sum())
assert_array_almost_equal(fa, scalar_measures.fractional_anisotropy(tensors))
def test_volume_fraction(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
vf = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
ev = numpy.linalg.eigvalsh(tt)
mn = ev.mean()
vf[i] = 1 - ev.prod() / (mn ** 3)
assert_array_almost_equal(vf, scalar_measures.volume_fraction(tensors))
def test_tensor_determinant(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
dt = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
dt[i] = numpy.linalg.det(tt)
assert_array_almost_equal(dt, scalar_measures.tensor_det(tensors))
def test_tensor_traces(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
res = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
res[i] = numpy.trace(tt)
assert_array_almost_equal(res, scalar_measures.tensor_trace(tensors))
def test_tensor_contraction(N=10, random=numpy.random.RandomState(0)):
tensors1 = random.randn(N, 3, 3)
tensors2 = random.randn(N, 3, 3)
res = numpy.empty(N)
for i in range(N):
t1 = tensors1[i]
t2 = tensors2[i]
tt1 = numpy.dot(t1, t1.T)
tt2 = numpy.dot(t2, t2.T)
tensors1[i] = tt1
tensors2[i] = tt2
res[i] = numpy.trace(numpy.dot(tt1, tt2.T))
assert_array_almost_equal(res, scalar_measures.tensor_contraction(tensors1, tensors2))
|
demianw/tract_querier
|
tract_querier/tensor/tests/test_scalar_measures.py
|
Python
|
bsd-3-clause
| 2,117
| 0.000945
|
# oracle/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Oracle database.
Oracle version 8 through current (11g at the time of this writing) are supported.
For information on connecting via specific drivers, see the documentation
for that driver.
Connect Arguments
-----------------
The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which
affect the behavior of the dialect regardless of driver in use.
* *use_ansi* - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults
to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins.
* *optimize_limits* - defaults to ``False``. see the section on LIMIT/OFFSET.
* *use_binds_for_limits* - defaults to ``True``. see the section on LIMIT/OFFSET.
Auto Increment Behavior
-----------------------
SQLAlchemy Table objects which include integer primary keys are usually assumed to have
"autoincrementing" behavior, meaning they can generate their own primary key values upon
INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences
to produce these values. With the Oracle dialect, *a sequence must always be explicitly
specified to enable autoincrement*. This is divergent with the majority of documentation
examples which assume the usage of an autoincrement-capable database. To specify sequences,
use the sqlalchemy.schema.Sequence object which is passed to a Column construct::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
Column(...), ...
)
This step is also required when using table reflection, i.e. autoload=True::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
autoload=True
)
Identifier Casing
-----------------
In Oracle, the data dictionary represents all case insensitive identifier names
using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier
name to be case insensitive. The Oracle dialect converts all case insensitive identifiers
to and from those two formats during schema level communication, such as reflection of
tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a
case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches
against data dictionary data received from Oracle, so unless identifier names have been
truly created as case sensitive (i.e. using quoted names), all lowercase names should be
used on the SQLAlchemy side.
Unicode
-------
SQLAlchemy 0.6 uses the "native unicode" mode provided as of cx_oracle 5. cx_oracle 5.0.2
or greater is recommended for support of NCLOB. If not using cx_oracle 5, the NLS_LANG
environment variable needs to be set in order for the oracle client library to use
proper encoding, such as "AMERICAN_AMERICA.UTF8".
Also note that Oracle supports unicode data through the NVARCHAR and NCLOB data types.
When using the SQLAlchemy Unicode and UnicodeText types, these DDL types will be used
within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still
requires NLS_LANG to be set.
LIMIT/OFFSET Support
--------------------
Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
is taken from
http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .
There are two options which affect its behavior:
* the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this
optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`.
* the values passed for the limit/offset are sent as bound parameters. Some users have observed
that Oracle produces a poor query plan when the values are sent as binds and not
rendered literally. To render the limit/offset values literally within the SQL
statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`.
Some users have reported better performance when the entirely different approach of a
window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note
that the majority of users don't observe this). To suit this case the
method used for LIMIT/OFFSET can be replaced entirely. See the recipe at
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
which installs a select compiler that overrides the generation of limit/offset with
a window function.
ON UPDATE CASCADE
-----------------
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution
is available at http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
"deferrable=True, initially='deferred'" keyword arguments,
and specify "passive_updates=False" on each relationship().
Oracle 8 Compatibility
----------------------
When Oracle 8 is detected, the dialect internally configures itself to the following
behaviors:
* the use_ansi flag is set to False. This has the effect of converting all
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
instead. This because these types don't seem to work correctly on Oracle 8
even though they are available. The :class:`~sqlalchemy.types.NVARCHAR`
and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
encodes all Python unicode objects to "string" before passing in as bind parameters.
Synonym/DBLINK Reflection
-------------------------
When using reflection with Table objects, the dialect can optionally search for tables
indicated by synonyms that reference DBLINK-ed tables by passing the flag
oracle_resolve_synonyms=True as a keyword argument to the Table construct. If DBLINK
is not in use this flag should be left off.
"""
import random, re
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, log
from sqlalchemy.engine import default, base, reflection
from sqlalchemy.sql import compiler, visitors, expression
from sqlalchemy.sql import operators as sql_operators, functions as sql_functions
from sqlalchemy import types as sqltypes
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, \
BLOB, CLOB, TIMESTAMP, FLOAT
RESERVED_WORDS = set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '
'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '
'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE ANY '
'TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE BY ASC '
'REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES AS IN VIEW '
'EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS NOT TRIGGER '
'ELSE CREATE INTERSECT PCTFREE DISTINCT USER CONNECT SET MODE '
'OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR DECIMAL UNION PUBLIC '
'AND START UID COMMENT'.split())
class RAW(sqltypes.LargeBinary):
pass
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = 'NCLOB'
VARCHAR2 = VARCHAR
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = 'NUMBER'
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
@property
def _type_affinity(self):
if bool(self.scale and self.scale > 0):
return sqltypes.Numeric
else:
return sqltypes.Integer
class DOUBLE_PRECISION(sqltypes.Numeric):
__visit_name__ = 'DOUBLE_PRECISION'
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = False
super(DOUBLE_PRECISION, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
class BFILE(sqltypes.LargeBinary):
__visit_name__ = 'BFILE'
class LONG(sqltypes.Text):
__visit_name__ = 'LONG'
class INTERVAL(sqltypes.TypeEngine):
__visit_name__ = 'INTERVAL'
def __init__(self,
day_precision=None,
second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs (cx_oracle and zxjdbc).
:param day_precision: the day precision value. this is the number of digits
to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the number of digits
to store for the fractional seconds field. Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(day_precision=interval.day_precision,
second_precision=interval.second_precision)
def adapt(self, impltype):
return impltype(day_precision=self.day_precision,
second_precision=self.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
class ROWID(sqltypes.TypeEngine):
"""Oracle ROWID type.
When used in a cast() or similar, generates ROWID.
"""
__visit_name__ = 'ROWID'
class _OracleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
colspecs = {
sqltypes.Boolean : _OracleBoolean,
sqltypes.Interval : INTERVAL,
}
ischema_names = {
'VARCHAR2' : VARCHAR,
'NVARCHAR2' : NVARCHAR,
'CHAR' : CHAR,
'DATE' : DATE,
'NUMBER' : NUMBER,
'BLOB' : BLOB,
'BFILE' : BFILE,
'CLOB' : CLOB,
'NCLOB' : NCLOB,
'TIMESTAMP' : TIMESTAMP,
'TIMESTAMP WITH TIME ZONE' : TIMESTAMP,
'INTERVAL DAY TO SECOND' : INTERVAL,
'RAW' : RAW,
'FLOAT' : FLOAT,
'DOUBLE PRECISION' : DOUBLE_PRECISION,
'LONG' : LONG,
}
class OracleTypeCompiler(compiler.GenericTypeCompiler):
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
def visit_datetime(self, type_):
return self.visit_DATE(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_unicode(self, type_):
if self.dialect._supports_nchar:
return self.visit_NVARCHAR(type_)
else:
return self.visit_VARCHAR(type_)
def visit_INTERVAL(self, type_):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None and
"(%d)" % type_.day_precision or
"",
type_.second_precision is not None and
"(%d)" % type_.second_precision or
"",
)
def visit_TIMESTAMP(self, type_):
if type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
def visit_DOUBLE_PRECISION(self, type_):
return self._generate_numeric(type_, "DOUBLE PRECISION")
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
def _generate_numeric(self, type_, name, precision=None, scale=None):
if precision is None:
precision = type_.precision
if scale is None:
scale = getattr(type_, 'scale', None)
if precision is None:
return name
elif scale is None:
return "%(name)s(%(precision)s)" % {'name':name,'precision': precision}
else:
return "%(name)s(%(precision)s, %(scale)s)" % {'name':name,'precision': precision, 'scale' : scale}
def visit_VARCHAR(self, type_):
if self.dialect._supports_char_length:
return "VARCHAR(%(length)s CHAR)" % {'length' : type_.length}
else:
return "VARCHAR(%(length)s)" % {'length' : type_.length}
def visit_NVARCHAR(self, type_):
return "NVARCHAR2(%(length)s)" % {'length' : type_.length}
def visit_text(self, type_):
return self.visit_CLOB(type_)
def visit_unicode_text(self, type_):
if self.dialect._supports_nchar:
return self.visit_NCLOB(type_)
else:
return self.visit_CLOB(type_)
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_big_integer(self, type_):
return self.visit_NUMBER(type_, precision=19)
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_RAW(self, type_):
return "RAW(%(length)s)" % {'length' : type_.length}
def visit_ROWID(self, type_):
return "ROWID"
class OracleCompiler(compiler.SQLCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{
expression.CompoundSelect.EXCEPT : 'MINUS'
}
)
def __init__(self, *args, **kwargs):
super(OracleCompiler, self).__init__(*args, **kwargs)
self.__wheres = {}
self._quoted_bind_names = {}
def visit_mod(self, binary, **kw):
return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_char_length_func(self, fn, **kw):
return "LENGTH" + self.function_argspec(fn, **kw)
def visit_match_op(self, binary, **kw):
return "CONTAINS (%s, %s)" % (self.process(binary.left), self.process(binary.right))
def get_select_hint_text(self, byfroms):
return " ".join(
"/*+ %s */" % text for table, text in byfroms.items()
)
def function_argspec(self, fn, **kw):
if len(fn.clauses) > 0:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def default_from(self):
"""Called when a ``SELECT`` statement has no froms, and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def visit_join(self, join, **kwargs):
if self.dialect.use_ansi:
return compiler.SQLCompiler.visit_join(self, join, **kwargs)
else:
kwargs['asfrom'] = True
return self.process(join.left, **kwargs) + \
", " + self.process(join.right, **kwargs)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
def visit_join(join):
if join.isouter:
def visit_binary(binary):
if binary.operator == sql_operators.eq:
if binary.left.table is join.right:
binary.left = _OuterJoinColumn(binary.left)
elif binary.right.table is join.right:
binary.right = _OuterJoinColumn(binary.right)
clauses.append(visitors.cloned_traverse(join.onclause, {},
{'binary':visit_binary}))
else:
clauses.append(join.onclause)
for j in join.left, join.right:
if isinstance(j, expression.Join):
visit_join(j)
for f in froms:
if isinstance(f, expression.Join):
visit_join(f)
if not clauses:
return None
else:
return sql.and_(*clauses)
def visit_outer_join_column(self, vc):
return self.process(vc.column) + "(+)"
def visit_sequence(self, seq):
return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs):
"""Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??"""
if asfrom or ashint:
alias_name = isinstance(alias.name, expression._generated_label) and \
self._truncated_identifier("alias", alias.name) or alias.name
if ashint:
return alias_name
elif asfrom:
return self.process(alias.original, asfrom=asfrom, **kwargs) + \
" " + self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def returning_clause(self, stmt, returning_cols):
def create_out_param(col, i):
bindparam = sql.outparam("ret_%d" % i, type_=col.type)
self.binds[bindparam.key] = bindparam
return self.bindparam_string(self._truncate_bindparam(bindparam))
columnlist = list(expression._select_iterables(returning_cols))
# within_columns_clause =False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) for c in columnlist]
binds = [create_out_param(c, i) for i, c in enumerate(columnlist)]
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
def _TODO_visit_compound_select(self, select):
"""Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle."""
pass
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``rownum`` criterion.
"""
if not getattr(select, '_oracle_visit', None):
if not self.dialect.use_ansi:
if self.stack and 'from' in self.stack[-1]:
existingfroms = self.stack[-1]['from']
else:
existingfroms = None
froms = select._get_display_froms(existingfroms)
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
select._oracle_visit = True
if select._limit is not None or select._offset is not None:
# See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html
#
# Generalized form of an Oracle pagination query:
# select ... from (
# select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from (
# select distinct ... where ... order by ...
# ) where ROWNUM <= :limit+:offset
# ) where ora_rn > :offset
# Outer select and "ROWNUM as ora_rn" can be dropped if limit=0
# TODO: use annotations instead of clone + attr set ?
select = select._generate()
select._oracle_visit = True
# Wrap the middle select and add the hint
limitselect = sql.select([c for c in select.c])
if select._limit and self.dialect.optimize_limits:
limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
# If needed, add the limiting clause
if select._limit is not None:
max_row = select._limit
if select._offset is not None:
max_row += select._offset
if not self.dialect.use_binds_for_limits:
max_row = sql.literal_column("%d" % max_row)
limitselect.append_whereclause(
sql.literal_column("ROWNUM")<=max_row)
# If needed, add the ora_rn, and wrap again with offset.
if select._offset is None:
limitselect.for_update = select.for_update
select = limitselect
else:
limitselect = limitselect.column(
sql.literal_column("ROWNUM").label("ora_rn"))
limitselect._oracle_visit = True
limitselect._is_wrapper = True
offsetselect = sql.select(
[c for c in limitselect.c if c.key!='ora_rn'])
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
offset_value = select._offset
if not self.dialect.use_binds_for_limits:
offset_value = sql.literal_column("%d" % offset_value)
offsetselect.append_whereclause(
sql.literal_column("ora_rn")>offset_value)
offsetselect.for_update = select.for_update
select = offsetselect
kwargs['iswrapper'] = getattr(select, '_is_wrapper', False)
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def limit_clause(self, select):
return ""
def for_update_clause(self, select):
if self.is_subquery():
return ""
elif select.for_update == "nowait":
return " FOR UPDATE NOWAIT"
else:
return super(OracleCompiler, self).for_update_clause(select)
class OracleDDLCompiler(compiler.DDLCompiler):
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
# its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
"Oracle does not contain native UPDATE CASCADE "
"functionality - onupdates will not be rendered for foreign keys. "
"Consider using deferrable=True, initially='deferred' or triggers.")
return text
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([x.lower() for x in RESERVED_WORDS])
illegal_initial_characters = set(xrange(0, 10)).union(["_", "$"])
def _bindparam_requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(unicode(value))
)
def format_savepoint(self, savepoint):
name = re.sub(r'^_+', '', savepoint.ident)
return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq):
return int(self._execute_scalar("SELECT " +
self.dialect.identifier_preparer.format_sequence(seq) +
".nextval FROM DUAL"))
class OracleDialect(default.DefaultDialect):
name = 'oracle'
supports_alter = True
supports_unicode_statements = False
supports_unicode_binds = False
max_identifier_length = 30
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_sequences = True
sequences_optional = False
postfetch_lastrowid = False
default_paramstyle = 'named'
colspecs = colspecs
ischema_names = ischema_names
requires_name_normalize = True
supports_default_values = False
supports_empty_insert = False
statement_compiler = OracleCompiler
ddl_compiler = OracleDDLCompiler
type_compiler = OracleTypeCompiler
preparer = OracleIdentifierPreparer
execution_ctx_cls = OracleExecutionContext
reflection_options = ('oracle_resolve_synonyms', )
def __init__(self,
use_ansi=True,
optimize_limits=False,
use_binds_for_limits=True,
**kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.use_ansi = use_ansi
self.optimize_limits = optimize_limits
self.use_binds_for_limits = use_binds_for_limits
def initialize(self, connection):
super(OracleDialect, self).initialize(connection)
self.implicit_returning = self.__dict__.get(
'implicit_returning',
self.server_version_info > (10, )
)
if self._is_oracle_8:
self.colspecs = self.colspecs.copy()
self.colspecs.pop(sqltypes.Interval)
self.use_ansi = False
@property
def _is_oracle_8(self):
return self.server_version_info and \
self.server_version_info < (9, )
@property
def _supports_char_length(self):
return not self._is_oracle_8
@property
def _supports_nchar(self):
return not self._is_oracle_8
def do_release_savepoint(self, connection, name):
# Oracle does not support RELEASE SAVEPOINT
pass
def has_table(self, connection, table_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text("SELECT table_name FROM all_tables "
"WHERE table_name = :name AND owner = :schema_name"),
name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text("SELECT sequence_name FROM all_sequences "
"WHERE sequence_name = :name AND sequence_owner = :schema_name"),
name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def normalize_name(self, name):
if name is None:
return None
# Py2K
if isinstance(name, str):
name = name.decode(self.encoding)
# end Py2K
if name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()):
name = name.upper()
# Py2K
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name)
# end Py2K
return name
def _get_default_schema_name(self, connection):
return self.normalize_name(connection.execute(u'SELECT USER FROM DUAL').scalar())
def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None):
"""search for a local synonym matching the given desired owner/name.
if desired_owner is None, attempts to locate a distinct owner.
returns the actual name, owner, dblink name, and synonym name if found.
"""
q = "SELECT owner, table_owner, table_name, db_link, synonym_name FROM all_synonyms WHERE "
clauses = []
params = {}
if desired_synonym:
clauses.append("synonym_name = :synonym_name")
params['synonym_name'] = desired_synonym
if desired_owner:
clauses.append("table_owner = :desired_owner")
params['desired_owner'] = desired_owner
if desired_table:
clauses.append("table_name = :tname")
params['tname'] = desired_table
q += " AND ".join(clauses)
result = connection.execute(sql.text(q), **params)
if desired_owner:
row = result.first()
if row:
return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
else:
return None, None, None, None
else:
rows = result.fetchall()
if len(rows) > 1:
raise AssertionError("There are multiple tables visible to the schema, you must specify owner")
elif len(rows) == 1:
row = rows[0]
return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
else:
return None, None, None, None
@reflection.cache
def _prepare_reflection_args(self, connection, table_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(schema),
desired_synonym=self.denormalize_name(table_name)
)
else:
actual_name, owner, dblink, synonym = None, None, None, None
if not actual_name:
actual_name = self.denormalize_name(table_name)
if not dblink:
dblink = ''
if not owner:
owner = self.denormalize_name(schema or self.default_schema_name)
return (actual_name, owner, dblink, synonym)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "SELECT username FROM all_users ORDER BY username"
cursor = connection.execute(s,)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
# note that table_names() isnt loading DBLINKed or synonym'ed tables
if schema is None:
schema = self.default_schema_name
s = sql.text(
"SELECT table_name FROM all_tables "
"WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') "
"AND OWNER = :owner "
"AND IOT_NAME IS NULL")
cursor = connection.execute(s, owner=schema)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
cursor = connection.execute(s, owner=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
columns = []
if self._supports_char_length:
char_length_col = 'char_length'
else:
char_length_col = 'data_length'
c = connection.execute(sql.text(
"SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
"nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
"WHERE table_name = :table_name AND owner = :owner "
"ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
table_name=table_name, owner=schema)
for row in c:
(colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
(self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])
if coltype == 'NUMBER' :
coltype = NUMBER(precision, scale)
elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
coltype = self.ischema_names.get(coltype)(length)
elif 'WITH TIME ZONE' in coltype:
coltype = TIMESTAMP(timezone=True)
else:
coltype = re.sub(r'\(\d+\)', '', coltype)
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, colname))
coltype = sqltypes.NULLTYPE
cdict = {
'name': colname,
'type': coltype,
'nullable': nullable,
'default': default,
}
if orig_colname.lower() == orig_colname:
cdict['quote'] = True
columns.append(cdict)
return columns
@reflection.cache
def get_indexes(self, connection, table_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
indexes = []
q = sql.text("""
SELECT a.index_name, a.column_name, b.uniqueness
FROM ALL_IND_COLUMNS%(dblink)s a,
ALL_INDEXES%(dblink)s b
WHERE
a.index_name = b.index_name
AND a.table_owner = b.table_owner
AND a.table_name = b.table_name
AND a.table_name = :table_name
AND a.table_owner = :schema
ORDER BY a.index_name, a.column_position""" % {'dblink': dblink})
rp = connection.execute(q, table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
indexes = []
last_index_name = None
pkeys = self.get_primary_keys(connection, table_name, schema,
resolve_synonyms=resolve_synonyms,
dblink=dblink,
info_cache=kw.get('info_cache'))
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)
def upper_name_set(names):
return set([i.upper() for i in names])
pk_names = upper_name_set(pkeys)
def remove_if_primary_key(index):
# don't include the primary key index
if index is not None and \
upper_name_set(index['column_names']) == pk_names:
indexes.pop()
index = None
for rset in rp:
if rset.index_name != last_index_name:
remove_if_primary_key(index)
index = dict(name=self.normalize_name(rset.index_name), column_names=[])
indexes.append(index)
index['unique'] = uniqueness.get(rset.uniqueness, False)
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
index['column_names'].append(self.normalize_name(rset.column_name))
last_index_name = rset.index_name
remove_if_primary_key(index)
return indexes
@reflection.cache
def _get_constraint_data(self, connection, table_name, schema=None,
dblink='', **kw):
rp = connection.execute(
sql.text("""SELECT
ac.constraint_name,
ac.constraint_type,
loc.column_name AS local_column,
rem.table_name AS remote_table,
rem.column_name AS remote_column,
rem.owner AS remote_owner,
loc.position as loc_pos,
rem.position as rem_pos
FROM all_constraints%(dblink)s ac,
all_cons_columns%(dblink)s loc,
all_cons_columns%(dblink)s rem
WHERE ac.table_name = :table_name
AND ac.constraint_type IN ('R','P')
AND ac.owner = :owner
AND ac.owner = loc.owner
AND ac.constraint_name = loc.constraint_name
AND ac.r_owner = rem.owner(+)
AND ac.r_constraint_name = rem.constraint_name(+)
AND (rem.position IS NULL or loc.position=rem.position)
ORDER BY ac.constraint_name, loc.position""" % {'dblink': dblink}),
table_name=table_name, owner=schema)
constraint_data = rp.fetchall()
return constraint_data
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
return self._get_primary_keys(connection, table_name, schema, **kw)[0]
@reflection.cache
def _get_primary_keys(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
pkeys = []
constraint_name = None
constraint_data = self._get_constraint_data(connection, table_name,
schema, dblink,
info_cache=kw.get('info_cache'))
for row in constraint_data:
#print "ROW:" , row
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'P':
if constraint_name is None:
constraint_name = self.normalize_name(cons_name)
pkeys.append(local_column)
return pkeys, constraint_name
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols, name = self._get_primary_keys(connection, table_name, schema=schema, **kw)
return {
'constrained_columns':cols,
'name':name
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
requested_schema = schema # to check later on
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
constraint_data = self._get_constraint_data(connection, table_name,
schema, dblink,
info_cache=kw.get('info_cache'))
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'R':
if remote_table is None:
# ticket 363
util.warn(
("Got 'None' querying 'table_name' from "
"all_cons_columns%(dblink)s - does the user have "
"proper rights to the table?") % {'dblink':dblink})
continue
rec = fkeys[cons_name]
rec['name'] = cons_name
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
if resolve_synonyms:
ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(remote_owner),
desired_table=self.denormalize_name(remote_table)
)
if ref_synonym:
remote_table = self.normalize_name(ref_synonym)
remote_owner = self.normalize_name(ref_remote_owner)
rec['referred_table'] = remote_table
if requested_schema is not None or self.denormalize_name(remote_owner) != schema:
rec['referred_schema'] = remote_owner
local_cols.append(local_column)
remote_cols.append(remote_column)
return fkeys.values()
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
info_cache = kw.get('info_cache')
(view_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, view_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
s = sql.text("""
SELECT text FROM all_views
WHERE owner = :schema
AND view_name = :view_name
""")
rp = connection.execute(s,
view_name=view_name, schema=schema).scalar()
if rp:
return rp.decode(self.encoding)
else:
return None
class _OuterJoinColumn(sql.ClauseElement):
__visit_name__ = 'outer_join_column'
def __init__(self, column):
self.column = column
|
jokajak/itweb
|
data/env/lib/python2.6/site-packages/SQLAlchemy-0.6.7-py2.6.egg/sqlalchemy/dialects/oracle/base.py
|
Python
|
gpl-3.0
| 43,930
| 0.005304
|
import logging
import traceback
import numpy as np
from eemeter.structures import EnergyTrace
logger = logging.getLogger(__name__)
class SplitModeledEnergyTrace(object):
''' Light wrapper around models applicable to a single trace which
fits and predicts multiple models for different segments.
Parameters
----------
trace : eemeter.structures.EnergyTrace
Trace to be modeled.
formatter : eemeter.modeling.formatter.Formatter
Formatter to prep trace data for modeling.
model_mapping : dict
Items of this dictionary map `modeling_period_label` s to models
modeling_period_set : eemeter.structures.ModelingPeriodSet
The set of modeling periods over which models should be applicable.
'''
def __init__(self, trace, formatter, model_mapping, modeling_period_set):
self.trace = trace
self.formatter = formatter
self.model_mapping = model_mapping
self.modeling_period_set = modeling_period_set
self.fit_outputs = {}
def __repr__(self):
return (
"SplitModeledEnergyTrace(trace={}, formatter={},"
" model_mapping={}, modeling_period_set={})"
.format(self.trace, self.formatter, self.model_mapping,
self.modeling_period_set)
)
def fit(self, weather_source):
''' Fit all models associated with this trace.
Parameters
----------
weather_source : eemeter.weather.ISDWeatherSource
Weather source to use in creating covariate data.
'''
for modeling_period_label, modeling_period in \
self.modeling_period_set.iter_modeling_periods():
filtered_data = self._filter_by_modeling_period(
self.trace, modeling_period)
filtered_trace = EnergyTrace(
self.trace.interpretation, data=filtered_data,
unit=self.trace.unit)
model = self.model_mapping[modeling_period_label]
try:
input_data = self.formatter.create_input(
filtered_trace, weather_source)
except:
logger.warn(
'For trace "{}" and modeling_period "{}", was not'
' able to format input data for {}.'
.format(self.trace.interpretation, modeling_period_label,
model)
)
self.fit_outputs[modeling_period_label] = {
"status": "FAILURE",
"traceback": traceback.format_exc(),
"start_date": None,
"end_date": None,
"rows": None,
}
continue
else:
input_description = self.formatter.describe_input(input_data)
outputs = {
"start_date": input_description.get('start_date'),
"end_date": input_description.get('end_date'),
"n_rows": input_description.get('n_rows'),
}
try:
outputs.update(model.fit(input_data))
except:
logger.warn(
'For trace "{}" and modeling_period "{}", {} was not'
' able to fit using input data: {}'
.format(self.trace.interpretation, modeling_period_label,
model, input_data)
)
outputs.update({
"status": "FAILURE",
"traceback": traceback.format_exc(),
})
else:
logger.info(
'Successfully fitted {} to formatted input data for'
' trace "{}" and modeling_period "{}".'
.format(model, self.trace.interpretation,
modeling_period_label)
)
outputs.update({"status": "SUCCESS"})
self.fit_outputs[modeling_period_label] = outputs
return self.fit_outputs
def predict(self, modeling_period_label, demand_fixture_data,
params=None):
''' Predict for any one of the modeling_periods associated with this
trace. Light wrapper around :code:`model.predict(` method.
Parameters
----------
modeling_period_label : str
Modeling period indicating which model to use in making the
prediction.
demand_fixture_data : object
Data (formatted by :code:`self.formatter`) over which prediction
should be made.
params : object, default None
Fitted parameters for the model. If :code:`None`, use parameters
found when :code:`.fit(` method was called.
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
logger.warn(
'Skipping prediction for modeling_period "{}" because'
' model fit failed.'.format(modeling_period_label)
)
return None
if params is None:
params = outputs["model_params"]
return self.model_mapping[modeling_period_label].predict(
demand_fixture_data, params)
def compute_derivative(self, modeling_period_label, derivative_callable,
**kwargs):
''' Compute a modeling derivative for this modeling period.
Parameters
----------
modeling_period_label : str
Label for modeling period for which derivative should be computed.
derivative_callable : callable
Callable which can be used as follows:
.. code-block: python
>>> derivative_callable(formatter, model, **kwargs)
**kwargs
Arbitrary keyword arguments to be passed to the derviative callable
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
return None
model = self.model_mapping[modeling_period_label]
try:
derivative = derivative_callable(self.formatter, model, **kwargs)
except Exception:
logger.exception("Derivative computation failed.")
return None
return derivative
@staticmethod
def _filter_by_modeling_period(trace, modeling_period):
start = modeling_period.start_date
end = modeling_period.end_date
if start is None:
if end is None:
filtered_df = trace.data.copy()
else:
filtered_df = trace.data[:end].copy()
else:
if end is None:
filtered_df = trace.data[start:].copy()
else:
filtered_df = trace.data[start:end].copy()
# require NaN last data point as cap
if filtered_df.shape[0] > 0:
filtered_df.value.iloc[-1] = np.nan
filtered_df.estimated.iloc[-1] = False
return filtered_df
|
impactlab/eemeter
|
eemeter/modeling/split.py
|
Python
|
mit
| 7,118
| 0.000281
|
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
_DEFAULT_GRAPH_SEED = 87654321
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed).
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
graph_seed = ops.get_default_graph().seed
if graph_seed is not None:
if op_seed is not None:
return graph_seed, op_seed
else:
return graph_seed, ops.get_default_graph()._last_id
else:
if op_seed is not None:
return _DEFAULT_GRAPH_SEED, op_seed
else:
return None, None
def set_random_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random_uniform([1])
b = tf.random_normal([1])
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A3'
print sess2.run(a) # generates 'A4'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random_uniform([1], seed=1)
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.set_random_seed(1234)
a = tf.random_uniform([1])
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate different
# sequences of 'a' and 'b'.
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B1'
print sess2.run(b) # generates 'B2'
```
Args:
seed: integer.
"""
ops.get_default_graph().seed = seed
|
arunhotra/tensorflow
|
tensorflow/python/framework/random_seed.py
|
Python
|
apache-2.0
| 4,427
| 0.002259
|
def address(self, data):
self.irc.send(self.privmsg("512 Shaw Court #105, Severn, MD 21144"))
|
Unallocated/UAS_IRC_Bot
|
modules/address.py
|
Python
|
gpl-3.0
| 195
| 0.010256
|
from django.db.models.fields import related
def _is_many_to_many_relation(field):
"""Check if a field specified a many-to-many relationship as defined by django.
This is the case if the field is an instance of the ManyToManyDescriptor as generated
by the django framework
Args:
field (django.db.models.fields): The field to check
Returns:
bool: true if the field is a many-to-many relationship
"""
return isinstance(field, related.ManyToManyDescriptor)
def _is_one_to_one_relation(field):
"""Check if a field specified a one-to-one relationship as defined by django.
This is the case if the field is an instance of the ForwardManyToOne as generated
by the django framework
Args:
field (django.db.models.fields): The field to check
Returns:
bool: true if the field is a one-to-one relationship
"""
return isinstance(field, related.ForwardManyToOneDescriptor)
def _get_prefetchable_fields(serializer):
"""Get the fields that are prefetchable according to the serializer description.
Method mainly used by for automatic schema generation.
Args:
serializer (Serializer): [description]
"""
def _is_field_prefetchable(field):
return _is_one_to_one_relation(field) or _is_many_to_many_relation(field)
meta = getattr(serializer, "Meta", None)
if meta is None:
return []
model = getattr(meta, "model", None)
if model is None:
return []
fields = []
for field_name in dir(model):
field = getattr(model, field_name)
if _is_field_prefetchable(field):
# ManyToMany relationship can be reverse
if hasattr(field, 'reverse') and field.reverse:
fields.append((field_name, field.field.model))
else:
fields.append((field_name, field.field.related_model))
return fields
|
rackerlabs/django-DefectDojo
|
dojo/api_v2/prefetch/utils.py
|
Python
|
bsd-3-clause
| 1,916
| 0.003132
|
"""
Evaluation of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle called lazy evaluation. If you
don't know about it, google it. That said, the typical entry point for static
analysis is calling ``eval_statement``. There's separate logic for
autocompletion in the API, the evaluator is all about evaluating an expression.
Now you need to understand what follows after ``eval_statement``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_statement`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment.
- ``Evaluator.eval_element`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_element`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_statement`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy evaluation is really good because it
only *evaluates* what needs to be *evaluated*. All the statements and modules
that are not used are just being ignored.
"""
import copy
import sys
from jedi.parser.python import tree
from jedi import debug
from jedi.common import unite
from jedi.evaluate import representation as er
from jedi.evaluate import imports
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate.cache import memoize_default
from jedi.evaluate import stdlib
from jedi.evaluate import finder
from jedi.evaluate import compiled
from jedi.evaluate import precedence
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate import pep0484
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.instance import AnonymousInstance, BoundMethod
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
class Evaluator(object):
def __init__(self, grammar, sys_path=None):
self.grammar = grammar
self.memoize_cache = {} # for memoize decorators
# To memorize modules -> equals `sys.modules`.
self.modules = {} # like `sys.modules`.
self.compiled_cache = {} # see `evaluate.compiled.create()`
self.mixed_cache = {} # see `evaluate.compiled.mixed.create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.python_version = sys.version_info[:2]
if sys_path is None:
sys_path = sys.path
self.sys_path = copy.copy(sys_path)
try:
self.sys_path.remove('')
except ValueError:
pass
self.reset_recursion_limitations()
# Constants
self.BUILTINS = compiled.get_special_object(self, 'BUILTINS')
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def find_types(self, context, name_or_str, name_context, position=None,
search_global=False, is_goto=False):
"""
This is the search function. The most important part to debug.
`remove_statements` and `filter_statements` really are the core part of
this completion.
:param position: Position of the last statement -> tuple of line, column
:return: List of Names. Their parents are the types.
"""
f = finder.NameFinder(self, context, name_context, name_or_str, position)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
def eval_statement(self, context, stmt, seek_name=None):
with recursion.execution_allowed(self, stmt) as allowed:
if allowed or context.get_root_context() == self.BUILTINS:
return self._eval_stmt(context, stmt, seek_name)
return set()
#@memoize_default(default=[], evaluator_is_first_arg=True)
@debug.increase_indent
def _eval_stmt(self, context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_statement %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
types = self.eval_element(context, rhs)
if seek_name:
c_node = ContextualizedName(context, seek_name)
types = finder.check_tuple_assignments(self, c_node, types)
first_operation = stmt.first_operation()
if first_operation not in ('=', None) and first_operation.type == 'operator':
# `=` is always the last character in aug assignments -> -1
operator = copy.copy(first_operation)
operator.value = operator.value[:-1]
name = str(stmt.get_defined_names()[0])
left = context.py__getattribute__(
name, position=stmt.start_pos, search_global=True)
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
if for_stmt is not None and for_stmt.type == 'for_stmt' and types \
and for_stmt.defines_one_name():
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_input_node()
cn = ContextualizedNode(context, node)
ordered = list(iterable.py__iter__(self, cn.infer(), cn))
for lazy_context in ordered:
dct = {str(for_stmt.children[1]): lazy_context.infer()}
with helpers.predefine_names(context, for_stmt, dct):
t = self.eval_element(context, rhs)
left = precedence.calculate(self, context, left, operator, t)
types = left
else:
types = precedence.calculate(self, context, left, operator, types)
debug.dbg('eval_statement result %s', types)
return types
def eval_element(self, context, element):
if isinstance(context, iterable.CompForContext):
return self._eval_element_not_cached(context, element)
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
if if_stmt.type in ('if_stmt', 'for_stmt'):
break
if if_stmt.is_scope():
if_stmt = None
break
predefined_if_name_dict = context.predefined_names.get(if_stmt)
if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt':
if_stmt_test = if_stmt.children[1]
name_dicts = [{}]
# If we already did a check, we don't want to do it again -> If
# context.predefined_names is filled, we stop.
# We don't want to check the if stmt itself, it's just about
# the content.
if element.start_pos > if_stmt_test.end_pos:
# Now we need to check if the names in the if_stmt match the
# names in the suite.
if_names = helpers.get_names_of_node(if_stmt_test)
element_names = helpers.get_names_of_node(element)
str_element_names = [str(e) for e in element_names]
if any(str(i) in str_element_names for i in if_names):
for if_name in if_names:
definitions = self.goto_definitions(context, if_name)
# Every name that has multiple different definitions
# causes the complexity to rise. The complexity should
# never fall below 1.
if len(definitions) > 1:
if len(name_dicts) * len(definitions) > 16:
debug.dbg('Too many options for if branch evaluation %s.', if_stmt)
# There's only a certain amount of branches
# Jedi can evaluate, otherwise it will take to
# long.
name_dicts = [{}]
break
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][str(if_name)] = set([definition])
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[str(if_name)] = definitions
if len(name_dicts) > 1:
result = set()
for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict):
result |= self._eval_element_not_cached(context, element)
return result
else:
return self._eval_element_if_evaluated(context, element)
else:
if predefined_if_name_dict:
return self._eval_element_not_cached(context, element)
else:
return self._eval_element_if_evaluated(context, element)
def _eval_element_if_evaluated(self, context, element):
"""
TODO This function is temporary: Merge with eval_element.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return self._eval_element_not_cached(context, element)
return self._eval_element_cached(context, element)
@memoize_default(default=set(), evaluator_is_first_arg=True)
def _eval_element_cached(self, context, element):
return self._eval_element_not_cached(context, element)
@debug.increase_indent
def _eval_element_not_cached(self, context, element):
debug.dbg('eval_element %s@%s', element, element.start_pos)
types = set()
typ = element.type
if typ in ('name', 'number', 'string', 'atom'):
types = self.eval_atom(context, element)
elif typ == 'keyword':
# For False/True/None
if element.value in ('False', 'True', 'None'):
types.add(compiled.builtin_from_name(self, element.value))
# else: print e.g. could be evaluated like this in Python 2.7
elif typ == 'lambda':
types = set([er.FunctionContext(self, context, element)])
elif typ == 'expr_stmt':
types = self.eval_statement(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
if not (first_child.type == 'keyword' and first_child.value == 'await'):
types = self.eval_atom(context, first_child)
for trailer in element.children[1:]:
if trailer == '**': # has a power operation.
right = self.eval_element(context, element.children[2])
types = set(precedence.calculate(self, context, types, trailer, right))
break
types = self.eval_trailer(context, types, trailer)
elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
types = set([iterable.SequenceLiteralContext(self, context, element)])
elif typ in ('not_test', 'factor'):
types = self.eval_element(context, element.children[-1])
for operator in element.children[:-1]:
types = set(precedence.factor_calculate(self, types, operator))
elif typ == 'test':
# `x if foo else y` case.
types = (self.eval_element(context, element.children[0]) |
self.eval_element(context, element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not evaluated.
assert element.value == '...'
types = set([compiled.create(self, Ellipsis)])
elif typ == 'dotted_name':
types = self.eval_atom(context, element.children[0])
for next_name in element.children[2::2]:
# TODO add search_global=True?
types = unite(
typ.py__getattribute__(next_name, name_context=context)
for typ in types
)
types = types
elif typ == 'eval_input':
types = self._eval_element_not_cached(context, element.children[0])
elif typ == 'annassign':
types = pep0484._evaluate_for_annotation(context, element.children[1])
else:
types = precedence.calculate_children(self, context, element.children)
debug.dbg('eval_element result %s', types)
return types
def eval_atom(self, context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
if atom.type == 'name':
# This is the first global lookup.
stmt = atom.get_definition()
if stmt.type == 'comp_for':
stmt = tree.search_ancestor(stmt, ('expr_stmt', 'lambda', 'funcdef', 'classdef'))
if stmt is None or stmt.type != 'expr_stmt':
# We only need to adjust the start_pos for statements, because
# there the name cannot be used.
stmt = atom
return context.py__getattribute__(
name_or_str=atom,
position=stmt.start_pos,
search_global=True
)
elif isinstance(atom, tree.Literal):
return set([compiled.create(self, atom.eval())])
else:
c = atom.children
if c[0].type == 'string':
# Will be one string.
types = self.eval_atom(context, c[0])
for string in c[1:]:
right = self.eval_atom(context, string)
types = precedence.calculate(self, context, types, '+', right)
return types
# Parentheses without commas are not tuples.
elif c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp' and
len(c[1].children) > 1):
return self.eval_element(context, c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if comp_for == ':':
# Dict comprehensions have a colon at the 3rd index.
try:
comp_for = c[1].children[3]
except IndexError:
pass
if comp_for.type == 'comp_for':
return set([iterable.Comprehension.from_atom(self, context, atom)])
# It's a dict/list/tuple literal.
array_node = c[1]
try:
array_node_c = array_node.children
except AttributeError:
array_node_c = []
if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
context = iterable.DictLiteralContext(self, context, atom)
else:
context = iterable.SequenceLiteralContext(self, context, atom)
return set([context])
def eval_trailer(self, context, types, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = ()
new_types = set()
if trailer_op == '[':
new_types |= iterable.py__getitem__(self, context, types, trailer)
else:
for typ in types:
debug.dbg('eval_trailer: %s in scope %s', trailer, typ)
if trailer_op == '.':
new_types |= typ.py__getattribute__(
name_context=context,
name_or_str=node
)
elif trailer_op == '(':
arguments = param.TreeArguments(self, context, node, trailer)
new_types |= self.execute(typ, arguments)
return new_types
@debug.increase_indent
def execute(self, obj, arguments):
if not isinstance(arguments, param.AbstractArguments):
raise NotImplementedError
arguments = param.Arguments(self, arguments)
if self.is_analysis:
arguments.eval_all()
debug.dbg('execute: %s %s', obj, arguments)
try:
# Some stdlib functions like super(), namedtuple(), etc. have been
# hard-coded in Jedi to support them.
return stdlib.execute(self, obj, arguments)
except stdlib.NotInStdLib:
pass
try:
func = obj.py__call__
except AttributeError:
debug.warning("no execution possible %s", obj)
return set()
else:
types = func(arguments)
debug.dbg('execute result: %s in %s', types, obj)
return types
def goto_definitions(self, context, name):
def_ = name.get_definition()
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
if name.parent.type == 'classdef' and name.parent.name == name:
return [er.ClassContext(self, name.parent, context)]
elif name.parent.type == 'funcdef':
return [er.FunctionContext(self, context, name.parent)]
elif name.parent.type == 'file_input':
raise NotImplementedError
if def_.type == 'expr_stmt' and name in def_.get_defined_names():
return self.eval_statement(context, def_, name)
elif def_.type == 'for_stmt':
container_types = self.eval_element(context, def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterable.py__iter__types(self, container_types, cn)
c_node = ContextualizedName(context, name)
return finder.check_tuple_assignments(self, c_node, for_types)
elif def_.type in ('import_from', 'import_name'):
return imports.infer_import(context, name)
return helpers.evaluate_call_of_leaf(context, name)
def goto(self, context, name):
stmt = name.get_definition()
par = name.parent
if par.type == 'argument' and par.children[1] == '=' and par.children[0] == name:
# Named param goto.
trailer = par.parent
if trailer.type == 'arglist':
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
types = self.eval_element(context, trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_evaluate = trailer.parent.children[:i]
types = self.eval_element(context, to_evaluate[0])
for trailer in to_evaluate[1:]:
types = self.eval_trailer(context, types, trailer)
param_names = []
for typ in types:
try:
get_param_names = typ.get_param_names
except AttributeError:
pass
else:
for param_name in get_param_names():
if param_name.string_name == name.value:
param_names.append(param_name)
return param_names
elif par.type == 'expr_stmt' and name in par.get_defined_names():
# Only take the parent, because if it's more complicated than just
# a name it's something you can "goto" again.
return [TreeNameDefinition(context, name)]
elif par.type == 'param' and par.name:
return [ParamName(context, name)]
elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name:
return [TreeNameDefinition(context, name)]
elif isinstance(stmt, tree.Import):
module_names = imports.infer_import(context, name, is_goto=True)
return module_names
elif par.type == 'dotted_name': # Is a decorator.
index = par.children.index(name)
if index > 0:
new_dotted = helpers.deep_ast_copy(par)
new_dotted.children[index - 1:] = []
values = self.eval_element(context, new_dotted)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
if par.type == 'trailer' and par.children[0] == '.':
values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
else:
if stmt.type != 'expr_stmt':
# We only need to adjust the start_pos for statements, because
# there the name cannot be used.
stmt = name
return context.py__getattribute__(
name,
position=stmt.start_pos,
search_global=True, is_goto=True
)
def create_context(self, base_context, node, node_is_context=False, node_is_object=False):
def parent_scope(node):
while True:
node = node.parent
if node.is_scope():
return node
elif node.type in ('argument', 'testlist_comp'):
if node.children[1].type == 'comp_for':
return node.children[1]
elif node.type == 'dictorsetmaker':
for n in node.children[1:4]:
# In dictionaries it can be pretty much anything.
if n.type == 'comp_for':
return n
def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False):
if scope_node == base_node:
return base_context
is_funcdef = scope_node.type in ('funcdef', 'lambda')
parent_scope = scope_node.get_parent_scope()
parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
if is_funcdef:
if isinstance(parent_context, AnonymousInstance):
func = BoundMethod(
self, parent_context, parent_context.class_context,
parent_context.parent_context, scope_node
)
else:
func = er.FunctionContext(
self,
parent_context,
scope_node
)
if is_nested and not node_is_object:
return func.get_function_execution()
return func
elif scope_node.type == 'classdef':
class_context = er.ClassContext(self, scope_node, parent_context)
if child_is_funcdef:
# anonymous instance
return AnonymousInstance(self, parent_context, class_context)
else:
return class_context
elif scope_node.type == 'comp_for':
if node.start_pos >= scope_node.children[-1].start_pos:
return parent_context
return iterable.CompForContext.from_comp_for(parent_context, scope_node)
raise Exception("There's a scope that was not managed.")
base_node = base_context.tree_node
if node_is_context and node.is_scope():
scope_node = node
else:
if node.parent.type in ('funcdef', 'classdef'):
# When we're on class/function names/leafs that define the
# object itself and not its contents.
node = node.parent
scope_node = parent_scope(node)
return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/jedi/evaluate/__init__.py
|
Python
|
bsd-2-clause
| 26,873
| 0.001191
|
import os
import sys
import shutil
import straight.plugin
import numpy as np
import pkg_resources
from os import path
from core import utils
from core import argparser
from core import log
from core import parser
def main():
## Parse arguments
ap = argparser.init_arg_parser()
options = ap.parse_args()
## Collect input gbks from folder
input_files = []
if not path.isdir(options["input_folder"]):
log.error("Specified folder didn't exist '%s'" % (options["input_folder"]))
sys.exit(1)
else:
for filename in os.listdir(options["input_folder"]):
filepath = path.join(options["input_folder"], filename)
if not path.isdir(filepath):
ext = path.splitext(filepath)[1][1:]
if ext in ["gbk"]:
input_files.append(filename)
## Initial check parameters
metadata = {}
if options["mode"] == "train":
## check and load metadata file
if not path.exists(options["training_metadata"]):
log.error("Specified file didn't exist '%s'" % (options["training_metadata"]))
sys.exit(1)
else:
metadata = parser.parse_training_metadata(options["training_metadata"])
options["single_values"] = [[]] * len(input_files)
options["train_set"] = []
options["test_set"] = []
# remove GBKs not listed in metadata
input_files[:] = [bgc for bgc in input_files if utils.get_bgc_name(bgc) in metadata["bgc"]]
# features
if "features" not in options:
if "features" not in metadata:
options["features"] = [{"name": plugin.name, "params": [], "subs": [sub for sub in plugin.features]} for plugin in utils.load_plugins("feature_extraction")]
else:
options["features"] = metadata["features"]
# algorithm mode (classification / regression)
if metadata["mode"] == "CLASSIFICATION":
options["algo_mode"] = "classification"
if "algorithm" not in options:
if "algorithm" not in metadata:
options["algorithm"] = {"name": "svm", "params": []}
else:
options["algorithm"] = metadata["algorithm"]
elif metadata["mode"] == "REGRESSION":
options["algo_mode"] = "regression"
if "algorithm" not in options:
if "algorithm" not in metadata:
options["algorithm"] = {"name": "linear_regression", "params": []}
else:
options["algorithm"] = metadata["algorithm"]
else:
log.error("Incorrect metadata file format '%s'" % (options["training_metadata"]))
sys.exit(1)
# single values (from right hand side of data column) & train/test set distribution
for i, fp in enumerate(input_files):
bgc_id = utils.get_bgc_name(fp)
if bgc_id in metadata["bgc"]:
idx_meta = metadata["bgc"].index(bgc_id)
options["single_values"][i] = metadata["single_values"][idx_meta]
if idx_meta in metadata["train_set"]:
options["train_set"].append(i)
if idx_meta in metadata["test_set"]:
options["test_set"].append(i)
else:
log.error("'%s' is not included in your metadata" % (bgc_id))
sys.exit(1)
# pair values for training set (from its own table from the metadata)
options["train_pair_values"] = [[None] * len(options["train_set"]) for _ in range(len(options["train_set"]))]
for i, idx1 in enumerate(options["train_set"]):
for j, idx2 in enumerate(options["train_set"]):
if len(metadata["train_pair_values"]) > i and len(metadata["train_pair_values"][i]) > j:
options["train_pair_values"][i][j] = metadata["train_pair_values"][i][j]
# pair values for test set (from its own table from the metadata)
options["test_pair_values"] = [[None] * len(options["test_set"]) for _ in range(len(options["test_set"]))]
for i, idx1 in enumerate(options["test_set"]):
for j, idx2 in enumerate(options["test_set"]):
if len(metadata["test_pair_values"]) > i and len(metadata["test_pair_values"][i]) > j:
options["test_pair_values"][i][j] = metadata["test_pair_values"][i][j]
if options["mode"] == "predict":
## check and load model file
print "..."
## further checks..
algo_type = utils.get_algo_type(options["algorithm"]["name"])
if algo_type not in ["classification", "regression"]:
log.error("Selected algorithm '%s' did not exist" % (algo["name"]))
sys.exit(1)
if options["algo_mode"] != algo_type:
log.error("Selected algorithm '%s' is for %s, but the provided data is for %s." % (options["algorithm"]["name"], algo_type, options["algo_mode"]))
sys.exit(1)
options["features_scope"] = ""
for idx, feature in enumerate(options["features"]):
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
if len(options["features_scope"]) > 0 and plugin.scope != options["features_scope"]:
log.error("You selected features of different scope ('%s:%s', '%s:%s'). Please select only combination of features with the same scope." % (feature["name"], plugin.scope, options["features"][idx - 1]["name"], options["features_scope"]))
sys.exit(1)
options["features_scope"] = plugin.scope
break
if len(feature["subs"]) < 1:
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
feature["subs"].extend(plugin.features)
break
for sub in feature["subs"]:
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
if sub not in plugin.features:
log.error("Feature unknown: '%s'" % sub)
sys.exit(1)
## Check output folder
if not options["output_folder"]:
options["output_folder"] = path.join(os.getcwd(), path.basename(options["input_folder"]))
if path.isdir(options["output_folder"]):
# output folder exist, probable disrupted job
if not options["continue"] and not options["overwrite"]:
log.error("Output folder '%s' exist. Previous run? use --continue to continue, or --overwrite to start over." % options["output_folder"])
sys.exit(1)
elif options["overwrite"]:
shutil.rmtree(options["output_folder"])
os.makedirs(options["output_folder"])
elif options["reset_preprocesses"]:
bgcjsonpath = path.join(options["output_folder"], "bgcjson")
if path.exists(bgcjsonpath):
shutil.rmtree(bgcjsonpath)
else:
os.makedirs(options["output_folder"])
## Parse gbks
## TODO: multi-threading?
log.info("Started preprocessing input files..")
utils.print_progress(0, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1)
for i, filename in enumerate(input_files):
filepath = path.join(options["input_folder"], filename)
if not (path.exists(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(filepath)))):
bgc = parser.parse_gbk(filepath)
if bgc is not None:
utils.save_bgcjson(bgc, options["output_folder"])
utils.print_progress(i + 1, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1, bar_length=100)
log.info("Finished preprocessing input files..")
## Do feature extraction
# step 1: make folder structure & index file
feature_folder = utils.create_feature_folder(input_files, options["output_folder"])
# step 2: traverse FE modules and run algorithms, then save the results
feature_extraction_plugins = []
for plugin in utils.load_plugins("feature_extraction"):
if ("features" not in options) or (plugin.name in [feature["name"] for feature in options["features"]]):
feature_extraction_plugins.append(plugin)
# calculate features
options["feature_values"] = {}
if options["features_scope"] == "pair":
log.info("Started feature extraction for all BGC pairs..")
nrcomb = len(input_files) * (len(input_files) - 1) / 2
count = 0
utils.print_progress(0, nrcomb, prefix='Feature extraction..', suffix='', decimals=1)
for i, fn1 in enumerate(input_files):
for j, fn2 in enumerate(input_files):
if i < j:
bgc1 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn1)))
bgc2 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn2)))
for plugin in feature_extraction_plugins:
if plugin.name not in options["feature_values"]:
options["feature_values"][plugin.name] = {}
results = plugin.calculate(bgc1, bgc2)
options["feature_values"][plugin.name]["%d+%d" % (i, j)] = [float(result) for result in results]
count += 1
utils.print_progress(count, nrcomb, prefix='Feature extraction..', suffix='', decimals=1)
elif options["features_scope"] == "single":
log.info("Started feature extraction for all BGCs..")
count = 0
utils.print_progress(0, len(input_files), prefix='Feature extraction..', suffix='', decimals=1)
for i, fn in enumerate(input_files):
bgc = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn)))
for plugin in feature_extraction_plugins:
if plugin.name not in options["feature_values"]:
options["feature_values"][plugin.name] = {}
results = plugin.calculate(bgc)
options["feature_values"][plugin.name]["%d" % (i)] = [float(result) for result in results]
count += 1
utils.print_progress(count, len(input_files), prefix='Feature extraction..', suffix='', decimals=1)
else:
log.error("Invalid features scope: '%s'" % options["features_scope"])
sys.exit(1)
## Load features & value matrix
features_rows = []
if options["features_scope"] == "pair":
for i, fn1 in enumerate(input_files):
for j, fn2 in enumerate(input_files):
if i < j:
features_rows.append([i, j])
elif options["features_scope"] == "single":
for i in xrange(0, len(input_files)):
features_rows.append([i])
else:
log.error("Invalid features scope: '%s'" % options["features_scope"])
sys.exit(1)
if "features_columns" not in options:
options["features_columns"] = []
for feature in options["features"]:
for sub in feature["subs"]:
options["features_columns"].append("%s.%s" % (feature["name"], sub))
features_matrix = {}
for row_ids in ["+".join([str(row_id) for row_id in row_ids]) for row_ids in features_rows]:
row = [None] * len(options["features_columns"])
for plugin in feature_extraction_plugins:
plugin_folder = path.join(feature_folder, plugin.name)
values = options["feature_values"][plugin.name][row_ids]
if (len(values) != len(plugin.features)):
# technically impossible to reach this, unless output from calculate != #of results expected
log.error("...")
sys.exit(1)
else:
for n, col in enumerate(plugin.features):
colname = ("%s.%s" % (plugin.name, col))
if colname in options["features_columns"]:
row[options["features_columns"].index(colname)] = values[n]
features_matrix[row_ids] = row
## Execute algorithms & save results
if options["mode"] == "train":
## Fetch feature & values training matrix
training_matrix = []
training_target = []
training_rownames = []
if options["features_scope"] == "pair":
for i, idx1 in enumerate(options["train_set"]):
for j, idx2 in enumerate(options["train_set"]):
if idx1 < idx2:
training_matrix.append(features_matrix["%d+%d" % (idx1, idx2)])
training_rownames.append("%s+%s" % (utils.get_bgc_name(input_files[idx1]), utils.get_bgc_name(input_files[idx2])))
if options["algo_mode"] == "classification":
class1 = options["single_values"][idx1].split(",")
class2 = options["single_values"][idx2].split(",")
training_target.append(int(len(set(class1) & set(class2)) > 0))
elif options["algo_mode"] == "regression":
training_target.append(float(options["train_pair_values"][i][j]))
elif options["features_scope"] == "single":
for idx in options["train_set"]:
training_matrix.append(features_matrix["%d" % (idx)])
training_rownames.append("%s" % (utils.get_bgc_name(input_files[idx1])))
training_target.append(options["single_values"][idx])
training_matrix = np.array(training_matrix)
training_target = np.array(training_target)
## Fetch feature & values testing matrix
testing_matrix = []
testing_target = []
testing_rownames = []
if options["features_scope"] == "pair":
for i, idx1 in enumerate(options["test_set"]):
for j, idx2 in enumerate(options["test_set"]):
if idx1 < idx2:
testing_matrix.append(features_matrix["%d+%d" % (idx1, idx2)])
testing_rownames.append("%s+%s" % (utils.get_bgc_name(input_files[idx1]), utils.get_bgc_name(input_files[idx2])))
if options["algo_mode"] == "classification":
class1 = options["single_values"][idx1].split(",")
class2 = options["single_values"][idx2].split(",")
testing_target.append(int(len(set(class1) & set(class2)) > 0))
elif options["algo_mode"] == "regression":
testing_target.append(float(options["test_pair_values"][i][j]))
elif options["features_scope"] == "single":
for idx in options["test_set"]:
testing_matrix.append(features_matrix["%d" % (idx)])
testing_rownames.append("%s" % (utils.get_bgc_name(input_files[idx1])))
testing_target.append(options["single_values"][idx])
testing_matrix = np.array(testing_matrix)
testing_target = np.array(testing_target)
## Load the training model
module = None
for plugin in utils.load_plugins(options["algo_mode"]):
if plugin.name == options["algorithm"]["name"]:
module = plugin
break
if module == None:
log.error("Failed to load module: '%s.%s'" % (options["algo_mode"], options["algorithm"]["name"]))
sys.exit(1)
else:
log.info("Training model...")
classifier = module.train(training_matrix, training_target, options["algorithm"]["params"])
# save model & its metadata to file
model_metadata = {
"mode": options["algo_mode"],
"algorithm": options["algorithm"],
"features": options["features"],
"columns": options["features_columns"],
"training_data_count": len(training_matrix),
"environment": {
"bgc-learn": utils.get_version(),
"scikit-learn": pkg_resources.get_distribution("scikit-learn").version,
"numpy": pkg_resources.get_distribution("numpy").version,
"scipy": pkg_resources.get_distribution("scipy").version,
}
}
save_name = utils.save_result_model(classifier, model_metadata, options["output_folder"])
# calculate accuracies & save summaries
result_training = ({}, [])
if len(training_matrix) > 0:
result_training = module.test(training_matrix, training_target, classifier)
utils.save_result_testing("training-%s" % (save_name), training_rownames, options["features_columns"], training_matrix, training_target, result_training, options["output_folder"])
result_testing = ({}, [])
if len(testing_matrix) > 0:
result_testing = module.test(testing_matrix, testing_target, classifier)
utils.save_result_testing("testing-%s" % (save_name), testing_rownames, options["features_columns"], testing_matrix, testing_target, result_testing, options["output_folder"])
elif options["mode"] == "predict":
print "..."
## Cleanup
log.info("Cleaning up..")
shutil.rmtree(feature_folder) # remove feature folder
## done
log.info("Analysis done. your result is available inside the folder '%s'." % options["output_folder"])
if __name__ == "__main__":
main()
|
satriaphd/bgc-learn
|
bgc-learn.py
|
Python
|
gpl-3.0
| 18,044
| 0.004434
|
# Copyright (c) 2011-2013 Peng Sun. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYRIGHT file.
# hone_lib.py
# provide library for mgmt program to create dataflow
import inspect
from cStringIO import StringIO
import hone_rts
from hone_util import LogUtil
from hone_message import *
globalFlowId = 0
''' class for data flow '''
def getNextFlowId():
global globalFlowId
globalFlowId += 1
return globalFlowId
class HoneDataFlow:
def __init__(self, q, operator):
self.flow = []
self.subFlows = [] #list of class HoneDataFlow items. Merged flows
self.flowId = getNextFlowId()
if (q != None):
self.flow.append(q)
if (operator != None):
self.flow.append(operator)
#debugLog('lib', 'new HoneDataFlow', self.flow)
def __rshift__(self, other):
#debugLog('lib', 'In rshift of HoneDataFlow', 'self', self.flow, 'other', \
# other.flow)
self.flow = self.flow + other.flow
return self
def addSubFlow(self, x):
self.subFlows.append(x)
def printDataFlow(self):
buf = StringIO()
print >>buf, 'flow id: ',self.flowId
if (isinstance(self.flow[0], HoneQuerySerialized)):
print >>buf, 'Select:',self.flow[0].se
print >>buf, 'From:',self.flow[0].ft
print >>buf, 'Where:',self.flow[0].wh
print >>buf, 'Groupby:',self.flow[0].gp
print >>buf, 'Every:',self.flow[0].ev
print >>buf, 'Aggregate:',self.flow[0].agg
print >>buf, self.flow[1:]
else:
print >>buf, self.flow
print >>buf, '\n'
ret = buf.getvalue()
buf.close()
for subFlow in self.subFlows:
ret += subFlow.printDataFlow()
return ret
def getFlowCriterion(self):
return self.flow[0].wh
''' query part '''
class HoneQuery:
def __init__(self,var,ft,wh,gp,every,agg,compose):
self.complete = False
self.var = var
self.ft = ft
self.wh = wh
self.gp = gp
self.every = every
self.agg = agg
self.compose = compose
def __rshift__(self, other):
HoneQuerySyntaxCheck(self)
#debugLog('lib', 'new HoneQuery instance created', self.printQuery())
return self.convertToHoneDataFlow() >> other
def __mul__(self, other):
otherName = other.__class__.__name__
if otherName=='HoneQuery':
return other.compose(self)
else:
raise Exception('HoneQuery cannot compose with %s' % otherName)
def printQuery(self):
ret = StringIO()
print >>ret, 'HoneQuery Select:',self.var
print >>ret, 'HoneQuery From:',self.ft
print >>ret, 'HoneQuery Where:',self.wh
print >>ret, 'HoneQuery Groupby:',self.gp
print >>ret, 'HoneQuery Every:',self.every
print >>ret, 'HoneQuery Aggregate:',self.agg
return ret.getvalue()
def convertToHoneDataFlow(self):
query = HoneQuerySerialized()
query.se = self.var
query.ft = self.ft
query.wh = self.wh
query.gp = self.gp
query.ev = self.every
query.agg = self.agg
return HoneDataFlow(query, None)
def Select(x):
def compose(q):
if q.var == None:
q.var = []
q.var = q.var+x
return q
agg = None
for i in range(0,len(x)):
if (type(x[i]) == type(tuple())):
if (agg == None):
agg = []
agg.append(x[i])
x[i] = x[i][0]
return HoneQuery(x,None,None,None,1000,agg,compose)
def From(ft):
def compose(q):
q.ft = ft
return q
return HoneQuery(None,ft,None,None,None,None,compose)
def Where(wh):
def compose(q):
if q.wh == None:
q.wh = []
q.wh = q.wh + wh
return q
return HoneQuery(None,None,wh,None,None,None,compose)
def Groupby(gp):
def compose(q):
if q.gp == None:
q.gp = []
q.gp = q.gp + gp
return q
return HoneQuery(None,None,None,gp,None,None,compose)
def Every(every):
def compose(q):
q.every = every
return q
return HoneQuery(None,None,None,None,every,None,compose)
def HoneQuerySyntaxCheck(q):
#debugLog('lib', 'syntax check of query', q.printQuery())
varOnlySupportEqualInWhere = ['app', 'srcIP', 'dstIP', 'srcPort', 'dstPort']
if q.var is None:
raise Exception('HoneQuery must at least have a Select')
if q.ft is None:
raise Exception('HoneQuery must have a From table')
if not hone_rts.HoneTableTypes.has_key(q.ft):
raise Exception('HoneQuery: No such From Table {}'.format(q.ft))
varName = []
for typ in q.var:
varName.append(typ)
if not (q.wh is None):
for (typ, op, value) in q.wh:
if not typ in varName:
raise Exception('HoneQuery: Where of not-Selected columns')
if (typ in varOnlySupportEqualInWhere) and (not (op == '==')):
raise Exception('Var {} only support == in Where clause'.format(typ))
if not (q.gp is None):
for typ in q.gp:
if not typ in varName:
raise Exception('HoneQuery: Groupby of not-Selected columns')
for typ in varName:
if not (typ in hone_rts.HoneTableTypes[q.ft]):
raise Exception('HoneQuery No type {} in Table {}'.format(typ, q.ft))
if q.agg is not None:
for (typ, op) in q.agg:
if not op in ['max', 'min', 'sum', 'avg']:
raise Exception('Only max, min, sum, avg are supported in Select {}'.format(typ))
if (q.ft == 'AppStatus'):
if 'app' not in varName:
#debugLog('lib', 'syntax check', q.printQuery())
raise Exception('Must Select \'app\' in AppStatus table')
''' operator part '''
def MapStreamSet(f):
if (isinstance(f,HoneDataFlow)):
return HoneDataFlow(None,['MapStreamSet'] + f.flow[0])
else:
return HoneDataFlow(None,['MapStreamSet', f.__name__])
def MapStream(f):
if (isinstance(f,HoneDataFlow)):
return HoneDataFlow(None,['MapStream'] + f.flow[0])
else:
return HoneDataFlow(None,['MapStream', f.__name__])
def MapList(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['MapList'] + f.flow[0])
else:
return HoneDataFlow(None,['MapList', f.__name__])
def FilterStreamSet(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterStreamSet'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterStreamSet', f.__name__])
def FilterStream(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterStream'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterStream', f.__name__])
def FilterList(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterList'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterList', f.__name__])
def ReduceStreamSet(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceStreamSet', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceStreamSet', init, f.__name__])
def ReduceStream(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceStream', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceStream', init, f.__name__])
def ReduceList(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceList', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceList', init, f.__name__])
def MergeHosts():
return HoneDataFlow(None,['MergeHosts'])
def MergeStreams(stream1, stream2):
if isinstance(stream1, HoneQuery):
stream1 = stream1.convertToHoneDataFlow()
if isinstance(stream2, HoneQuery):
stream2 = stream2.convertToHoneDataFlow()
operator = ['MergeStreams']
stream1.addSubFlow(stream2)
operator.append(stream2.flowId)
stream1.flow.append(operator)
return stream1
def MergeStreamsForSet(stream1, stream2):
if isinstance(stream1, HoneQuery):
stream1 = stream1.convertToHoneDataFlow()
if isinstance(stream2, HoneQuery):
stream2 = stream2.convertToHoneDataFlow()
operator = ['MergeStreamsForSet']
stream1.addSubFlow(stream2)
operator.append(stream2.flowId)
stream1.flow.append(operator)
return stream1
def Print(f=None):
if f:
return HoneDataFlow(None, ['Print', f.__name__])
else:
return HoneDataFlow(None, ['Print'])
def RegisterPolicy(f=None):
return HoneDataFlow(None, ['RegisterPolicy'])
def RateLimit(rate):
return HoneDataFlow(None, ['RateLimit', rate])
def Forward(path):
return HoneDataFlow(None, ['Forward', path])
def TreeMerge(f):
return HoneDataFlow(None, ['TreeMerge', f.__name__])
|
pupeng/hone
|
Controller/hone_lib.py
|
Python
|
bsd-3-clause
| 9,045
| 0.015478
|
""" Needed Tests
clip_to_rect() tests
--------------------
DONE *. clip_to_rect is inclusive on lower end and exclusive on upper end.
DONE *. clip_to_rect behaves intelligently under scaled ctm.
DONE *. clip_to_rect intersects input rect with the existing clipping rect.
DONE *. current rectangular clipping path is saved/restored to the stack when
save_state/restore_state are called.
DONE *. clip_to_rect clears current path.
DONE *. clip_to_rect raises NotImplementedError under a rotated ctm.
clip_to_rects() tests
---------------------
DONE *. Test that clip_to_rects raises not implemented, or whatever.
"""
import unittest
from numpy import array, transpose
import nose
from kiva.agg import GraphicsContextArray
import kiva
from test_utils import Utils
class ClipToRectTestCase(unittest.TestCase, Utils):
#------------------------------------------------------------------------
# Simple Clipping to a single rectangle.
#------------------------------------------------------------------------
def clip_to_rect_helper(self, desired, scale, clip_rects):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_args -- passed in as *clip_args to clip_to_rect.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
if isinstance(clip_rects, tuple):
gc.clip_to_rect(*clip_rects)
else:
for rect in clip_rects:
gc.clip_to_rect(*rect)
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc.set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_clip_to_rect_simple(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple2(self):
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 0, 255, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 1, 1)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_negative(self):
desired = array([[255, 255, 255, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]])
clip_rect = (-1, -1, 4, 4)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple3(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.49, 2.49)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple4(self):
desired = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.5, 2.5)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple5(self):
# This tests clipping with a larger rectangle
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rects = [(1, 1, 2, 2), (0, 0, 4, 4)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_empty_clip_region(self):
# This tests when the clipping region is clipped down to nothing.
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255]])
clip_rects = [(1,1,4,4), (3,3,1,1), (1,1,1,1)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_clip_to_rect_scaled(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_clip_to_rect_scaled2(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2.25, 2.25)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_save_restore_clip_state(self):
desired1 = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
desired2 = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
gc = GraphicsContextArray((4,4), pix_format="rgb24")
gc.clear((1.0, 1.0, 1.0))
gc.set_fill_color((0.0, 0.0, 0.0))
gc.clip_to_rect(1, 1, 3, 3)
gc.save_state()
gc.clip_to_rect(1, 1, 2, 2)
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual1 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired1, actual1)
gc.restore_state()
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual2 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired2, actual2)
def test_clip_to_rect_rotated(self):
# FIXME: test skipped
# This test raises an exception currently because the
# underlying library doesn't handle clipping to a rotated
# rectangle. For now, we catch the the case with an
# exception, so that people can't screw up. In the future,
# we should actually support this functionality.
raise nose.SkipTest
gc = GraphicsContextArray((1,1), pix_format="rgb24")
gc.rotate_ctm(1.0)
self.failUnlessRaises(NotImplementedError,
gc.clip_to_rect, 0, 0, 1, 1)
#------------------------------------------------------------------------
# Successive Clipping of multiple rectangles.
#------------------------------------------------------------------------
def successive_clip_helper(self, desired, scale,
clip_rect1, clip_rect2):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_rect1 -- 1st clipping path.
clip_rect2 -- 2nd clipping path.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
gc.clip_to_rect(*clip_rect1)
gc.clip_to_rect(*clip_rect2)
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc. set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_clip_successive_rects(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect1 = (1, 1, 20, 20)
clip_rect2 = (0, 0, 3, 3)
self.successive_clip_helper(desired, 1.0, clip_rect1, clip_rect2)
def test_clip_successive_rects2(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect1 = (1, 1, 20, 20)
clip_rect2 = (-1, -1, 4, 4)
self.successive_clip_helper(desired, 1.0, clip_rect1, clip_rect2)
#------------------------------------------------------------------------
# Save/Restore clipping path.
#------------------------------------------------------------------------
def test_save_restore_clip_path(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
# this is the clipping path we hope to see.
clip_rect1 = (1, 1, 2, 2)
# this will be a second path that will push/pop that should
# never be seen.
clip_rect2 = (1, 1, 1, 1)
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
gc.clip_to_rect(*clip_rect1)
# push and then pop a path that shouldn't affect the drawing
gc.save_state()
gc.clip_to_rect(*clip_rect2)
gc.restore_state()
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc. set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_reset_path(self):
""" clip_to_rect() should clear the current path.
This is to maintain compatibility with the version
of kiva that sits on top of Apple's Quartz engine.
"""
desired = array([[255, 255, 0, 0],
[255, 255, 0, 0],
[255, 255, 0, 0],
[255, 255, 0, 0]])
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
gc.rect(0, 0, 2, 4)
gc.clip_to_rect(0, 0, 4, 4)
gc.rect(2, 0, 2, 4)
# These settings allow the fastest path.
gc. set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
class ClipToRectsTestCase(unittest.TestCase):
def test_not_implemented(self):
""" fix me: Currently not implemented, so we just ensure that
any call to it throws an exception.
"""
gc = GraphicsContextArray((1,1), pix_format="rgb24")
gc.rotate_ctm(1.0)
#self.failUnlessRaises(NotImplementedError, gc.clip_to_rects, [[0, 0, 1, 1]])
if __name__ == "__main__":
unittest.main()
|
tommy-u/enable
|
kiva/agg/tests/clip_to_rect_test_case.py
|
Python
|
bsd-3-clause
| 12,045
| 0.003653
|
"""The tests for the Splunk component."""
import unittest
from unittest import mock
from homeassistant.setup import setup_component
import homeassistant.components.splunk as splunk
from homeassistant.const import STATE_ON, STATE_OFF, EVENT_STATE_CHANGED
from tests.common import get_test_home_assistant
class TestSplunk(unittest.TestCase):
"""Test the Splunk component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_config_full(self):
"""Test setup with all data."""
config = {
'splunk': {
'host': 'host',
'port': 123,
'token': 'secret',
'ssl': 'False',
'name': 'hostname',
}
}
self.hass.bus.listen = mock.MagicMock()
self.assertTrue(setup_component(self.hass, splunk.DOMAIN, config))
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(EVENT_STATE_CHANGED,
self.hass.bus.listen.call_args_list[0][0][0])
def test_setup_config_defaults(self):
"""Test setup with defaults."""
config = {
'splunk': {
'host': 'host',
'token': 'secret',
}
}
self.hass.bus.listen = mock.MagicMock()
self.assertTrue(setup_component(self.hass, splunk.DOMAIN, config))
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(EVENT_STATE_CHANGED,
self.hass.bus.listen.call_args_list[0][0][0])
def _setup(self, mock_requests):
"""Test the setup."""
self.mock_post = mock_requests.post
self.mock_request_exception = Exception
mock_requests.exceptions.RequestException = self.mock_request_exception
config = {
'splunk': {
'host': 'host',
'token': 'secret',
'port': 8088,
}
}
self.hass.bus.listen = mock.MagicMock()
setup_component(self.hass, splunk.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
@mock.patch.object(splunk, 'requests')
@mock.patch('json.dumps')
def test_event_listener(self, mock_dump, mock_requests):
"""Test event listener."""
mock_dump.side_effect = lambda x: x
self._setup(mock_requests)
valid = {'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0,
'foo': 'foo',
}
for in_, out in valid.items():
state = mock.MagicMock(state=in_,
domain='fake',
object_id='entity',
attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'domain': 'fake',
'entity_id': 'entity',
'attributes': {},
'time': '12345',
'value': out,
'host': 'HASS',
}]
payload = {'host': 'http://host:8088/services/collector/event',
'event': body}
self.handler_method(event)
self.assertEqual(self.mock_post.call_count, 1)
self.assertEqual(
self.mock_post.call_args,
mock.call(
payload['host'], data=payload,
headers={'Authorization': 'Splunk secret'},
timeout=10
)
)
self.mock_post.reset_mock()
|
MungoRae/home-assistant
|
tests/components/test_splunk.py
|
Python
|
apache-2.0
| 3,879
| 0
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class ParameterGroup(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self.name = None
self.description = None
self.engine = None
self._current_param = None
def __repr__(self):
return 'ParameterGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Parameter':
if self._current_param:
self[self._current_param.name] = self._current_param
self._current_param = Parameter(self)
return self._current_param
def endElement(self, name, value, connection):
if name == 'DBParameterGroupName':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Engine':
self.engine = value
else:
setattr(self, name, value)
def modifiable(self):
mod = []
for key in self:
p = self[key]
if p.is_modifiable:
mod.append(p)
return mod
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
param.value = value
param.apply_method = apply_method
self.params.append(param)
class Parameter(object):
"""
Represents a RDS Parameter
"""
ValidTypes = {'integer' : int,
'string' : str,
'boolean' : bool}
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
self.type = str
self.source = None
self.is_modifiable = True
self.description = None
self.apply_method = None
self.allowed_values = None
def __repr__(self):
return 'Parameter:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ParameterName':
self.name = value
elif name == 'ParameterValue':
self._value = value
elif name == 'DataType':
if value in self.ValidTypes:
self.type = value
elif name == 'Source':
if value in self.ValidSources:
self.source = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'Description':
self.description = value
elif name == 'ApplyType':
if value in self.ValidApplyTypes:
self.apply_type = value
elif name == 'AllowedValues':
self.allowed_values = value
else:
setattr(self, name, value)
def merge(self, d, i):
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
if self._value is not None:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, str) or isinstance(value, unicode):
raise ValueError, 'value must be of type str'
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
raise ValueError, 'value must be in %s' % self.allowed_values
self._value = value
def _set_integer_value(self, value):
if isinstance(value, str) or isinstance(value, unicode):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
raise ValueError, 'range is %s' % self.allowed_values
self._value = value
else:
raise ValueError, 'value must be integer'
def _set_boolean_value(self, value):
if isinstance(value, bool):
self._value = value
elif isinstance(value, str) or isinstance(value, unicode):
if value.lower() == 'true':
self._value = True
else:
self._value = False
else:
raise ValueError, 'value must be boolean'
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
elif self.type == 'integer':
self._set_integer_value(value)
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
raise TypeError, 'unknown type (%s)' % self.type
def get_value(self):
if self._value == None:
return self._value
if self.type == 'string':
return self._value
elif self.type == 'integer':
if not isinstance(self._value, int) and not isinstance(self._value, long):
self._set_integer_value(self._value)
return self._value
elif self.type == 'boolean':
if not isinstance(self._value, bool):
self._set_boolean_value(self._value)
return self._value
else:
raise TypeError, 'unknown type (%s)' % self.type
value = property(get_value, set_value, 'The value of the parameter')
def apply(self, immediate=False):
if immediate:
self.apply_method = 'immediate'
else:
self.apply_method = 'pending-reboot'
self.group.connection.modify_parameter_group(self.group.name, [self])
|
darcyliu/storyboard
|
boto/rds/parametergroup.py
|
Python
|
mit
| 7,126
| 0.002666
|
"""Task Utilities.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 Lewis Baker, Stuart McMahon.
@license: Licensed under the MIT license.
"""
import sys
import threading
_threadPool = None
_threadPoolLock = threading.Lock()
def setThreadPool(threadPool):
"""Set the default thread pool to use for executing new tasks.
@param threadPool: The new default thread pool.
@return: The previous default thread pool. This is intially None.
"""
global _threadPool, _threadPoolLock
_threadPoolLock.acquire()
try:
oldThreadPool = _threadPool
_threadPool = threadPool
finally:
_threadPoolLock.release()
return oldThreadPool
def getDefaultThreadPool():
"""Get the current default thread pool for new tasks.
If no default thread pool exists then one will be created automatically.
"""
global _threadPool, _threadPoolLock
if _threadPool is None:
import cake.threadpool
processorCount = cake.threadpool.getProcessorCount()
_threadPoolLock.acquire()
try:
if _threadPool is None:
_threadPool = cake.threadpool.ThreadPool(numWorkers=processorCount)
finally:
_threadPoolLock.release()
return _threadPool
class TaskError(Exception):
"""An exception type raised by the L{Task} class.
"""
pass
def _makeTasks(value):
if value is None:
return []
elif isinstance(value, Task):
return [value]
else:
return list(value)
class Task(object):
"""An operation that is performed on a background thread.
"""
class State(object):
"""A class that represents the state of a L{Task}.
"""
NEW = "new"
"""The task is in an uninitialised state."""
WAITING_FOR_START = "waiting for start"
"""The task is waiting to be started."""
RUNNING = "running"
"""The task is running."""
WAITING_FOR_COMPLETE = "waiting for complete"
"""The task is waiting to complete."""
SUCCEEDED = "succeeded"
"""The task has succeeded."""
FAILED = "failed"
"""The task has failed."""
_current = threading.local()
def __init__(self, func=None):
"""Construct a task given a function.
@param func: The function this task should run.
@type func: any callable
"""
self._func = func
self._immediate = None
self._threadPool = None
self._required = False
self._parent = Task.getCurrent()
self._state = Task.State.NEW
self._lock = threading.Lock()
self._startAfterCount = 0
self._startAfterFailures = False
self._startAfterDependencies = None
self._completeAfterCount = 0
self._completeAfterFailures = False
self._completeAfterDependencies = None
self._callbacks = []
@staticmethod
def getCurrent():
"""Get the currently executing task.
@return: The currently executing Task or None if no current task.
@rtype: Task or None
"""
return getattr(Task._current, "value", None)
@property
def state(self):
"""Get the state of this task.
"""
return self._state
@property
def parent(self):
"""Get the parent of this task.
The parent task is the task that created this task.
"""
return self._parent
@property
def required(self):
"""True if this task is required to execute, False if it
has not yet been required to execute.
"""
return self._required
@property
def started(self):
"""True if this task has been started.
A task is started if start(), startAfter(), lazyStart(),
lazyStartAfter() or cancel() has been called on it.
"""
return self._state is not Task.State.NEW
@property
def completed(self):
"""True if this task has finished execution or has been cancelled.
"""
s = self._state
return s is Task.State.SUCCEEDED or s is Task.State.FAILED
@property
def succeeded(self):
"""True if this task successfully finished execution.
"""
return self._state is Task.State.SUCCEEDED
@property
def failed(self):
"""True if this task failed or was cancelled.
"""
return self._state is Task.State.FAILED
@property
def result(self):
"""If the task has completed successfully then holds the
return value of the task, otherwise raises AttributeError.
"""
if self.succeeded:
task = self
while isinstance(task._result, Task):
task = task._result
return task._result
else:
raise AttributeError("result only available on successful tasks")
def lazyStart(self, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
A 'required' task is a task that is started eagerly using L{start()} or L{startAfter()}
or a task that is a dependency of a 'required' task.
If no other required tasks have this task as a dependency then this task will never
be executed. i.e. it is a lazy task.
"""
self._start(other=None, immediate=False, required=False, threadPool=threadPool)
def lazyStartAfter(self, other, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
But do not start this task until the 'other' tasks have completed.
If any of the other tasks complete with failure then this task will complete
with failure without being executed.
"""
self._start(other=other, immediate=False, required=False, threadPool=threadPool)
def start(self, immediate=False, threadPool=None):
"""Start this task now.
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: If specified then the task will be queued up to be
executed on the specified thread-pool. If not specified then the task
will be queued for execution on the default thread-pool.
@type threadPool: L{ThreadPool} or C{None}
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=None, immediate=immediate, required=True, threadPool=threadPool)
def startAfter(self, other, immediate=False, threadPool=None):
"""Start this task after other tasks have completed.
This task is cancelled (transition to Task.State.FAILED state) if any of the
other tasks fail.
@param other: The task or a list of tasks to start after.
@type other: L{Task} or C{list}(L{Task})
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: An optional thread pool to start this task on.
If not specified then the task is queued to the default thread-pool.
@type threadPool: L{ThreadPool} or None
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=other, immediate=immediate, required=True, threadPool=threadPool)
def _start(self, other, immediate, required, threadPool):
immediate = bool(immediate)
required = bool(required)
otherTasks = _makeTasks(other)
if threadPool is None:
threadPool = getDefaultThreadPool()
self._lock.acquire()
try:
if self._state is not Task.State.NEW:
raise TaskError("task already started")
self._state = Task.State.WAITING_FOR_START
self._startAfterCount = len(otherTasks) + 1
self._immediate = immediate
self._threadPool = threadPool
if required:
self._required = True
else:
required = self._required
if required:
completeAfterDependencies = self._completeAfterDependencies
self._completeAfterDependencies = None
else:
self._startAfterDependencies = otherTasks
finally:
self._lock.release()
if required:
for t in otherTasks:
t._require()
t.addCallback(lambda t=t: self._startAfterCallback(t))
if completeAfterDependencies:
for t in completeAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
self._startAfterCallback(self)
def _require(self):
"""Flag this task as required.
If this task was started with a call to lazyStart/lazyStartAfter()
and has not yet been required by some other Task then this will
cause this task and all of it's dependencies to become required.
"""
if self.required:
return
startAfterDependencies = None
completeAfterDependencies = None
self._lock.acquire()
try:
alreadyRequired = self.required
if not alreadyRequired:
startAfterDependencies = self._startAfterDependencies
completeAfterDependencies = self._completeAfterDependencies
self._startAfterDependencies = None
self._completeAfterDependencies = None
self._required = True
finally:
self._lock.release()
if not alreadyRequired:
if startAfterDependencies:
for t in startAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._startAfterCallback(t))
if completeAfterDependencies:
for t in completeAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
self._startAfterCallback(self)
def _startAfterCallback(self, task):
"""Callback that is called by each task we must start after.
"""
callbacks = None
self._lock.acquire()
try:
# If one task fails we should fail too
if task.failed:
self._startAfterFailures = True
# Wait for all other tasks to complete
self._startAfterCount -= 1
if self._startAfterCount > 0:
return
# Someone may have eg. cancelled us already
if self._state is not Task.State.WAITING_FOR_START:
return
if self._startAfterFailures:
self._state = Task.State.FAILED
callbacks = self._callbacks
self._callbacks = None
else:
self._state = Task.State.RUNNING
finally:
self._lock.release()
if callbacks is None:
# Task is ready to start executing, queue to thread-pool.
self._threadPool.queueJob(self._execute, front=self._immediate)
else:
# Task was cancelled, call callbacks now
for callback in callbacks:
callback()
def _execute(self):
"""Actually execute this task.
This should typically be run on a background thread.
"""
if self._state is not Task.State.RUNNING:
assert self._state is Task.State.FAILED, "should have been cancelled"
return
callbacks = None
try:
old = self.getCurrent()
self._current.value = self
# Don't hold onto the func after it has been executed so it can
# be garbage collected.
func = self._func
self._func = None
try:
if func is not None:
result = func()
else:
result = None
finally:
self._current.value = old
# If the result of the task was another task
# then our result will be the same as that other
# task's result. So make sure we don't complete
# before the other task does.
if isinstance(result, Task):
self.completeAfter(result)
self._lock.acquire()
try:
self._result = result
if self._state is Task.State.RUNNING:
if not self._completeAfterCount:
callbacks = self._callbacks
self._callbacks = None
if not self._completeAfterFailures:
self._state = Task.State.SUCCEEDED
else:
self._state = Task.State.FAILED
else:
self._state = Task.State.WAITING_FOR_COMPLETE
else:
assert self._state is Task.State.FAILED, "should have been cancelled"
finally:
self._lock.release()
except Exception, e:
trace = sys.exc_info()[2]
self._lock.acquire()
try:
self._exception = e
self._trace = trace
if self._state is Task.State.RUNNING:
if not self._completeAfterCount:
callbacks = self._callbacks
self._callbacks = None
self._state = Task.State.FAILED
else:
self._state = Task.State.WAITING_FOR_COMPLETE
else:
assert self._state is Task.State.FAILED, "should have been cancelled"
finally:
self._lock.release()
if callbacks:
for callback in callbacks:
callback()
def completeAfter(self, other):
"""Make sure this task doesn't complete until other tasks have completed.
@param other: The Task or list of Tasks to wait for.
@type other: L{Task} or C{list}(L{Task})
@raise TaskError: If this task has already finished executing.
"""
otherTasks = _makeTasks(other)
self._lock.acquire()
try:
if self.completed:
raise TaskError("Task function has already finished executing.")
required = self.required
if not required:
# This task not yet required
# Record it's dependencies in case it later becomes required
dependencies = self._completeAfterDependencies
if dependencies is None:
self._completeAfterDependencies = otherTasks
else:
dependencies.extend(otherTasks)
self._completeAfterCount += len(otherTasks)
finally:
self._lock.release()
if required:
# This task was already required so we'll require the new
# dependencies immediately.
for t in otherTasks:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
def _completeAfterCallback(self, task):
"""Callback that is called by each task we must complete after.
"""
callbacks = None
self._lock.acquire()
try:
self._completeAfterCount -= 1
if task.failed:
self._completeAfterFailures = True
if self._state is Task.State.WAITING_FOR_COMPLETE and self._completeAfterCount == 0:
if hasattr(self, "_result") and not self._completeAfterFailures:
self._state = Task.State.SUCCEEDED
else:
self._state = Task.State.FAILED
callbacks = self._callbacks
self._callbacks = None
finally:
self._lock.release()
if callbacks:
for callback in callbacks:
callback()
def cancel(self):
"""Cancel this task if it hasn't already started.
Completes the task, setting its state to Task.State.FAILED.
@raise TaskError: if the task has already completed.
"""
self._lock.acquire()
try:
if self.completed:
raise TaskError("Task already completed")
self._state = Task.State.FAILED
callbacks = self._callbacks
self._callbacks = None
finally:
self._lock.release()
for callback in callbacks:
callback()
def addCallback(self, callback):
"""Register a callback to be run when this task is complete.
@param callback: The callback to add.
@type callback: any callable
"""
if not self.completed:
self._lock.acquire()
try:
callbacks = self._callbacks
if callbacks is not None:
# Task is not yet complete, queue up callback to execute later.
callbacks.append(callback)
return
finally:
self._lock.release()
callback()
|
lewissbaker/cake
|
src/cake/task.py
|
Python
|
mit
| 15,540
| 0.013964
|
# -*- coding: utf-8 -*-
#
# web_container.py
#
# Copyright © 2016-2017 Antergos
#
# This file is part of whither.
#
# whither is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# whither is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with whither; If not, see <http://www.gnu.org/licenses/>.
|
Antergos/whither
|
whither/toolkits/gtk/web_container.py
|
Python
|
gpl-3.0
| 985
| 0
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import network
ALIAS = 'os-floating-ip-pools'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(wsgi.Controller):
"""The Floating IP Pool API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API(skip_policy_check=True)
super(FloatingIPPoolsController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Return a list of pools."""
context = req.environ['nova.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class FloatingIpPools(extensions.V21APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPPoolsController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
scripnichenko/nova
|
nova/api/openstack/compute/floating_ip_pools.py
|
Python
|
apache-2.0
| 2,196
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
from django.conf import settings
import json
import sys
import csv
from datetime import datetime, date, time
from bson.code import Code
from bson.objectid import ObjectId
from bson.errors import InvalidId
from bson import json_util
from pymongo import MongoClient, DESCENDING
from collections import OrderedDict
def checkObjectId(s):
try:
ObjectId(s)
except InvalidId:
return False
return True
def run_aggregation_pipeline(database_name, collection_name, pipeline):
result = False
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# explain = db.command('aggregate', collection, pipeline=pipeline, explain=True)
# print explain
collection.aggregate(pipeline)
# print agg_result
result = True
return result
def to_json(results_dict):
return json.dumps(results_dict, indent=4, default=json_util.default)
def normalize_results(results_dict):
mydt = datetime.now()
myd = date.today()
myt = time(0, 0)
for r in results_dict['results']:
for k, v in r.items():
if isinstance(r[k], type(mydt)) or \
isinstance(r[k], type(myd)) or \
isinstance(r[k], type(myt)):
r[k] = v.__str__()
# print r[k]
return results_dict
def normalize_list(results_list):
mydt = datetime.now()
for r in results_list:
for k, v in r.items():
if isinstance(r[k], type(mydt)):
r[k] = v.__str__()
return results_list
def query_mongo(
database_name,
collection_name,
query={},
include_num_results="0",
skip=0,
sort=None,
limit=getattr(
settings,
'MONGO_LIMIT',
200),
cast_strings_to_integers=False,
return_keys=()):
"""return a response_dict with a list of search results"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# Cast the query to integers
if cast_strings_to_integers:
query = cast_number_strings_to_integers(query)
# print query
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit)
else:
mysearchresult = collection.find(query).skip(skip).limit(limit)
if sort:
mysearchresult.sort(sort)
response_dict['code'] = 200
if include_num_results == "1":
response_dict['num_results'] = response_dict['num_results'] = int(
mysearchresult.count(with_limit_and_skip=False))
if include_num_results == "2":
response_dict['num_results'] = response_dict['num_results'] = int(
mysearchresult.count(with_limit_and_skip=True))
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except Exception:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def query_mongo_sort_decend(
database_name,
collection_name,
query={},
skip=0,
limit=getattr(
settings,
'MONGO_LIMIT',
200),
return_keys=(),
sortkey=None):
"""return a response_dict with a list of search results in decending
order based on a sort key
"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit).sort(
sortkey, DESCENDING)
else:
mysearchresult = collection.find(query).skip(
skip).limit(limit).sort(sortkey, DESCENDING)
# response_dict['num_results']=int(mysearchresult.count(with_limit_and_skip=False))
response_dict['code'] = 200
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except Exception:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def delete_mongo(database_name, collection_name,
query={}, just_one=False):
"""delete from mongo helper"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
mysearchresult = collection.remove(query, just_one)
response_dict['code'] = 200
response_dict['type'] = "remove-confirmation"
except Exception:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def write_mongo(document, database_name,
collection_name, update=False):
"""Write a document to the collection. Return a response_dict containing
the written record. Method functions as both insert or update based on update
parameter"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# Cast the query to integers
# if settings.CAST_ININGS_TO_INTEGERS:
# query = cast_number_strings_to_integers(query)
potential_key_found = False
existing_transaction_id = None
existing_mongo_id = None
# enforce non-repudiation constraint on create
# if document.has_key("transaction_id"):
# existing_transaction_id = collection.find_one({'transaction_id':document['transaction_id']})
# if existing_transaction_id:
# potential_key_found = True
if "id" in document:
document["_id"] = ObjectId(document["id"])
del document["id"]
if "_id" in document:
existing_mongo_id = collection.find_one({'_id': document['_id']})
if existing_mongo_id:
potential_key_found = True
if update == False and potential_key_found == True:
"""409 conflict"""
response_dict['code'] = 409
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict[
'message'] = "Perhaps you meant to perform an update instead?"
response_dict['errors'] = [
"Conflict. This transaction_id has already been created.", ]
return response_dict
elif update and potential_key_found: # this is an update
# set kwargs _id to the existing_id to force to overwrite existing
# document
# if existing_transaction_id:
#
# document['_id'] = ObjectId(existing_transaction_id['_id'])
# document['history']=True
# history_collection_name = "%s_history" % str(collection_name)
# history_collection = db[str(history_collection_name)]
#
# history_object = existing_transaction_id
# history_object['historical_id'] = existing_transaction_id['_id']
# del history_object['_id']
# #now write the record to the historical collection
# written_object = history_collection.insert(history_object)
if existing_mongo_id:
document['_id'] = ObjectId(existing_mongo_id['_id'])
document['history'] = True
history_collection_name = "%s_history" % str(collection_name)
history_collection = db[str(history_collection_name)]
# print history_collection
# print existing_mongo_id
history_object = existing_mongo_id
history_object['historical_id'] = existing_mongo_id['_id']
del history_object['_id']
# print history_object
# now write the record to the historical collection
written_object = history_collection.insert(history_object)
# update the record
myobjectid = collection.save(document)
else:
# this is new so perform an insert.
myobjectid = collection.insert(document)
# now fetch the record we just wrote so that we write it back to the
# DB.
myobject = collection.find_one({'_id': myobjectid})
response_dict['code'] = 200
response_dict['type'] = "write-results"
myobject['id'] = myobject['_id'].__str__()
del myobject['_id']
l.append(myobject)
response_dict['results'] = l
except Exception:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['code'] = 400
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def bulk_csv_import_mongo(csvfile, database_name, collection_name,
delete_collection_before_import=False):
"""return a response_dict with a list of search results"""
"""method can be insert or update"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mconnection = MongoClient(
mongodb_client_url, document_class=OrderedDict)
db = mconnection[database_name]
collection = db[collection_name]
if delete_collection_before_import:
myobjectid = collection.remove({})
# open the csv file.
csvhandle = csv.reader(open(csvfile._get_path(), 'rb'), delimiter=',')
rowindex = 0
errors = 0
error_list = []
success = 0
for row in csvhandle:
if rowindex == 0:
column_headers = row
cleaned_headers = []
for c in column_headers:
c = c.replace(".", "")
c = c.replace("$", "-")
c = c.replace(" ", "_")
cleaned_headers.append(c)
else:
record = OrderedDict(zip(cleaned_headers, row))
# if there is no values, skip the key value pair
kwargs = OrderedDict()
# Only populate fields that are not blank.
for k, v in record.items():
if v:
if v.isdigit():
kwargs[k] = int(v)
else:
kwargs[k] = v
try:
myobjectid = collection.insert(kwargs)
success += 1
except Exception:
error_message = "Error on row " + \
rowindex + ". " + str(sys.exc_info())
error_list.append(str(sys.exc_info()))
rowindex += 1
if error_list:
response_dict = {}
response_dict['num_rows_imported'] = rowindex
response_dict['num_rows_errors'] = len(error_list)
response_dict['errors'] = error_list
response_dict['code'] = 400
response_dict['message'] = "Completed with errors"
else:
response_dict = {}
response_dict['num_rows_imported'] = success
response_dict['code'] = 200
response_dict['message'] = "Completed."
return response_dict
except Exception:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['num_results'] = 0
response_dict['code'] = 400
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def build_non_observational_key(k):
if str(k).__contains__("__"):
model_field_split = str(k).split("__")
newlabel = "%s_" % (model_field_split[0])
field_occurence_split = str(model_field_split[1]).split("_")
for i in field_occurence_split[:-1]:
newlabel = "%s_%s" % (newlabel, i)
return newlabel
return k
def get_collection_keys(database_name, collection_name):
l = []
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mconnection = MongoClient(
mongodb_client_url, document_class=OrderedDict)
db = mconnection[database_name]
ckey_collection = "%s_keys" % (collection_name)
collection = db[ckey_collection]
result = collection.find({}).distinct("_id")
for r in result:
l.append(r)
if getattr(settings, 'SORTCOLUMNS', False):
nl = [] # new list list
# sort the list according to our list
for i in getattr(settings, 'SORTCOLUMNS', False):
for j in l:
if j.__contains__(i):
nl.append(j)
difflist = list(set(l) - set(nl))
for i in difflist:
nl.append(i)
return nl
else:
return sorted(l)
except Exception:
print("Error.", str(sys.exc_info()))
return []
def build_keys_with_mapreduce(database_name, collection_name):
map = Code("function() { "
" for (var key in this)"
" { emit(key, null); } }"
)
reduce = Code("function(key, stuff)"
"{ return null; }"
)
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[database_name]
collection = db[collection_name]
result_collection_name = "%s_keys" % (collection_name)
result = collection.map_reduce(map, reduce, result_collection_name)
return None
def raw_query_mongo_db(kwargs, database_name, collection_name):
# for key in kwargs:
# print "arg: %s: %s" % (key, kwargs[key])
"""return a result list or an empty list"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[database_name]
transactions = db[collection_name]
mysearchresult = transactions.find(kwargs)
mysearchcount = mysearchresult.count()
if mysearchcount > 0:
response_dict['code'] = 200
for d in mysearchresult:
l.append(d)
response_dict['results'] = l
except Exception:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['code'] = 400
response_dict['type'] = "Error"
response_dict['message'] = str(sys.exc_info())
return response_dict
def cast_number_strings_to_integers(d):
"""d is a dict"""
for k, v in d.items():
# print type(v)
if determine_if_str_or_unicode(v):
if v.isdigit():
d[k] = int(v)
return d
def determine_if_str_or_unicode(s):
# if str or unicode return True, else False.
if isinstance(s, str) or isinstance(s, unicode):
return True
return False
|
videntity/django-djmongo
|
djmongo/mongoutils.py
|
Python
|
gpl-2.0
| 17,724
| 0.00079
|
import re
import csv
from urllib import parse
import lxml.html
from pupa.scrape import Person, Scraper
class NoDetails(Exception):
pass
SESSION_NUMBERS = {
'2011': '62nd',
'2013': '63rd',
'2015': '64th',
'2017': '65th',
}
class MTPersonScraper(Scraper):
def url_xpath(self, url):
# Montana's legislator page was returning valid content with 500
# code as of 1/9/2013. Previous discussions with them after similar
# incidents in the past suggest some external part of their stack
# is having some issue and the error is bubbling up to the ret code.
self.raise_errors = False
html = self.get(url).text
doc = lxml.html.fromstring(html)
self.raise_errors = True
return doc
def scrape(self, chamber=None, session=None):
if not session:
session = max(SESSION_NUMBERS.keys())
session_number = SESSION_NUMBERS[session]
chambers = [chamber] if chamber else ['upper', 'lower']
for chamber in chambers:
url = 'http://leg.mt.gov/content/sessions/{}/{}{}Members.txt'.format(
session_number, session, 'Senate' if chamber == 'upper' else 'House'
)
yield from self.scrape_legislators(url, chamber=chamber)
def scrape_legislators(self, url, chamber):
data = self.get(url).text
data = data.replace('"""', '"') # weird triple quotes
data = data.splitlines()
fieldnames = ['last_name', 'first_name', 'party', 'district',
'address', 'city', 'state', 'zip']
csv_parser = csv.DictReader(data, fieldnames)
district_leg_urls = self._district_legislator_dict()
# Toss the row headers.
next(csv_parser)
for entry in csv_parser:
if not entry:
continue
# District.
district = entry['district']
hd_or_sd, district = district.split()
# Party.
party_letter = entry['party']
party = {'D': 'Democratic', 'R': 'Republican'}[party_letter]
# Get full name properly capped.
fullname = '%s %s' % (entry['first_name'].capitalize(),
entry['last_name'].capitalize())
# Get any info at the legislator's detail_url.
detail_url = district_leg_urls[hd_or_sd][district]
# Get the office.
address = '\n'.join([
entry['address'],
'%s, %s %s' % (entry['city'].title(), entry['state'], entry['zip'])
])
try:
deets = self._scrape_details(detail_url)
except NoDetails:
self.logger.warning("No details found at %r" % detail_url)
continue
legislator = Person(name=fullname, primary_org=chamber, district=district,
party=party, image=entry.get('photo_url', ''))
legislator.add_source(detail_url)
legislator.add_source(url)
legislator.add_link(detail_url)
legislator.add_contact_detail(type='address', value=address, note='District Office')
phone = deets.get('phone')
fax = deets.get('fax')
email = deets.get('email')
if phone:
legislator.add_contact_detail(type='voice', value=phone, note='District Office')
if fax:
legislator.add_contact_detail(type='fax', value=fax, note='District Office')
if email:
legislator.add_contact_detail(type='email', value=email, note='District Office')
yield legislator
def _district_legislator_dict(self):
'''Create a mapping of districts to the legislator who represents
each district in each house.
Used to get properly capitalized names in the legislator scraper.
'''
res = {'HD': {}, 'SD': {}}
url = 'http://leg.mt.gov/css/find%20a%20legislator.asp'
# Get base url.
parts = parse.urlparse(url)
parts._replace(path='')
baseurl = parts.geturl()
# Go the find-a-legislator page.
doc = self.url_xpath(url)
doc.make_links_absolute(baseurl)
# Get the link to the current member roster.
url = doc.xpath('//a[contains(@href, "roster.asp")]/@href')[0]
# Fetch it.
self.raise_errors = False
html = self.get(url).text
doc = lxml.html.fromstring(html)
self.raise_errors = True
# Get the new baseurl, like 'http://leg.mt.gov/css/Sessions/62nd/'
parts = parse.urlparse(url)
path, _, _ = parts.path.rpartition('/')
parts._replace(path=path)
baseurl = parts.geturl()
doc.make_links_absolute(baseurl)
table = doc.xpath('//table[@name="Legislators"]')[0]
for tr in table.xpath('tr'):
td1, td2 = tr.xpath('td')
# Skip header rows and retired legislators
if not td2.text_content().strip() or 'Resigned' in tr.text_content():
continue
# Get link to the member's page.
detail_url = td1.xpath('h4/a/@href')[0]
# Get the members district so we can match the
# profile page with its csv record.
house, district = td2.text_content().split()
res[house][district] = detail_url
return res
def _scrape_details(self, url):
'''Scrape the member's bio page.
Things available but not currently scraped are office address,
and waaay too much contact info, including personal email, phone.
'''
doc = self.url_xpath(url)
# Get base url.
parts = parse.urlparse(url)
parts._replace(path='')
baseurl = parts.geturl()
doc.make_links_absolute(baseurl)
xpath = '//img[contains(@src, "legislator")]/@src'
try:
photo_url = doc.xpath(xpath).pop()
except IndexError:
raise NoDetails('No details found at %r' % url)
details = {'photo_url': photo_url}
# # Parse address.
elements = list(doc.xpath('//b[contains(., "Address")]/..')[0])
# # MT's website currently has a typo that places the "address"
# # heading inline with the "Information Office" phone number.
# # This hack tempprarily makes things work.
elements = elements[3:]
chunks = []
for br in elements:
chunks.extend(filter(None, [br.text, br.tail]))
# As far as I can tell, MT legislators don't have capital offices.
for line in chunks[2:]:
if not line.strip():
continue
for key in ('ph', 'fax'):
if key in line.lower():
key = {'ph': 'phone'}.get(key)
break
number = re.search('\(\d{3}\) \d{3}\-\d{4}', line)
if number:
number = number.group()
if key:
# Used to set this on the office.
details[key] = number
try:
email = doc.xpath('//b[contains(., "Email")]/..')[0]
except IndexError:
pass
else:
if email:
html = lxml.html.tostring(email.getparent()).decode()
match = re.search(r'[a-zA-Z0-9\.\_\%\+\-]+@\w+\.[a-z]+', html)
if match:
details['email'] = match.group()
return details
|
cliftonmcintosh/openstates
|
openstates/mt/people.py
|
Python
|
gpl-3.0
| 7,587
| 0.001977
|
import copy
import pytest
from peek.line import InvalidIpAddressException, Line, InvalidStatusException
# 127.0.0.1 - - [01/Jan/1970:00:00:01 +0000] "GET / HTTP/1.1" 200 193 "-" "Python"
test_line_contents = {
'ip_address': '127.0.0.1',
'timestamp': '[01/Jan/1970:00:00:01 +0000]',
'verb': 'GET',
'path': '/',
'status': '200',
'size': '193',
'referrer': '-',
'user_agent': 'Python'
}
def get_updated_line_contents(updates=None):
test_contents = copy.deepcopy(test_line_contents)
if updates is not None:
test_contents.update(updates)
return test_contents
test_line = Line(line_contents=test_line_contents)
class TestLineInstantiation:
@pytest.mark.parametrize('expected,actual', [
('127.0.0.1', test_line.ip_address),
(1, test_line.timestamp),
('GET', test_line.verb),
('/', test_line.path),
(200, test_line.status),
(193, test_line.byte_count),
('-', test_line.referrer),
('Python', test_line.user_agent)
])
def test_retrieval(self, expected, actual):
assert expected == actual
class TestLineExceptions:
def test_passing_invalid_ip_address_throws_exception(self):
with pytest.raises(InvalidIpAddressException):
line = Line(line_contents=get_updated_line_contents({'ip_address': 'foobar'}))
def test_passing_non_parseable_status_throws_exception(self):
with pytest.raises(InvalidStatusException):
Line(line_contents=get_updated_line_contents({'status': 'foobar'}))
|
purrcat259/peek
|
tests/unit/test_line.py
|
Python
|
mit
| 1,585
| 0.001262
|
from flask import request, render_template
from flask.ext.login import current_user, login_user
from mysite.weibo import Client
from mysite import app, db
from mysite.models import Wuser, User
from . import weibo
@weibo.route('/oauthreturn')
def oauthreturn():
code = request.args.get('code', '')
if code:
client = Client(app.config['API_KEY'], app.config['API_SECRET'], app.config['REDIRECT_URI'])
client.set_code(code)
uid = client.token['uid']
profile = client.get('users/show', access_token=client.access_token, uid=uid)
wuser = Wuser.query.filter_by(uid=uid).first()
if wuser:
login_user(wuser.user)
else:
user = User()
wuser = Wuser(uid=uid)
wuser.user = user
db.session.add(user)
login_user(user)
wuser.update_access_token(client.token['access_token'])
wuser.update_profile(profile)
db.session.add(wuser)
db.session.commit()
return render_template("weibo/profile.html", wuser=wuser)
|
liyigerry/caixiang
|
mysite/views/weibo/oauthreturn.py
|
Python
|
mit
| 941
| 0.026567
|
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import timeutils
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'services')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('services')
elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
elem.set('binary')
elem.set('host')
elem.set('zone')
elem.set('status')
elem.set('state')
elem.set('update_at')
elem.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
# TODO(uni): template elements of 'host', 'service' and 'disabled'
# should be deprecated to make ServicesUpdateTemplate consistent
# with ServicesIndexTemplate. Still keeping it here for API
# compatibility sake.
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('service')
root.set('disabled')
root.set('binary')
root.set('status')
root.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServiceController(wsgi.Controller):
def __init__(self, ext_mgr=None):
self.ext_mgr = ext_mgr
super(ServiceController, self).__init__()
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""Return a list of all running services.
Filter by host & service name.
"""
context = req.environ['cinder.context']
authorize(context, action='index')
detailed = self.ext_mgr.is_loaded('os-extended-services')
now = timeutils.utcnow(with_timezone=True)
services = objects.ServiceList.get_all(context)
host = ''
if 'host' in req.GET:
host = req.GET['host']
service = ''
if 'service' in req.GET:
service = req.GET['service']
versionutils.report_deprecated_feature(LOG, _(
"Query by service parameter is deprecated. "
"Please use binary parameter instead."))
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s.host == host]
# NOTE(uni): deprecating service request key, binary takes precedence
binary_key = binary or service
if binary_key:
services = [s for s in services if s.binary == binary_key]
svcs = []
for svc in services:
updated_at = svc.updated_at
delta = now - (svc.updated_at or svc.created_at)
delta_sec = delta.total_seconds()
if svc.modified_at:
delta_mod = now - svc.modified_at
if abs(delta_sec) >= abs(delta_mod.total_seconds()):
updated_at = svc.modified_at
alive = abs(delta_sec) <= CONF.service_down_time
art = (alive and "up") or "down"
active = 'enabled'
if svc.disabled:
active = 'disabled'
ret_fields = {'binary': svc.binary, 'host': svc.host,
'zone': svc.availability_zone,
'status': active, 'state': art,
'updated_at': timeutils.normalize_time(updated_at)}
if detailed:
ret_fields['disabled_reason'] = svc.disabled_reason
svcs.append(ret_fields)
return {'services': svcs}
def _is_valid_as_reason(self, reason):
if not reason:
return False
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
except exception.InvalidInput:
return False
return True
@wsgi.serializers(xml=ServicesUpdateTemplate)
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['cinder.context']
authorize(context, action='update')
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
ret_val = {}
if id == "enable":
disabled = False
status = "enabled"
if ext_loaded:
ret_val['disabled_reason'] = None
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
else:
raise webob.exc.HTTPNotFound(explanation=_("Unknown action"))
try:
host = body['host']
except (TypeError, KeyError):
msg = _("Missing required element 'host' in request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled'] = disabled
if id == "disable-log-reason" and ext_loaded:
reason = body.get('disabled_reason')
if not self._is_valid_as_reason(reason):
msg = _('Disabled reason contains invalid characters '
'or is too long')
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled_reason'] = reason
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
if not binary_key:
raise webob.exc.HTTPBadRequest()
try:
svc = objects.Service.get_by_args(context, host, binary_key)
if not svc:
raise webob.exc.HTTPNotFound(explanation=_('Unknown service'))
svc.disabled = ret_val['disabled']
if 'disabled_reason' in ret_val:
svc.disabled_reason = ret_val['disabled_reason']
svc.save()
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("service not found"))
ret_val.update({'host': host, 'service': service,
'binary': binary, 'status': status})
return ret_val
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/volume/ext/services/api/v2"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
controller = ServiceController(self.ext_mgr)
resource = extensions.ResourceExtension('os-services', controller)
resources.append(resource)
return resources
|
nikesh-mahalka/cinder
|
cinder/api/contrib/services.py
|
Python
|
apache-2.0
| 7,673
| 0
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, subprocess, glob
script_dir = os.path.dirname(os.path.realpath(__file__))
publisher_command = os.environ.get("SIMPLE_COMMUNICATION_PUBLISHER_BIN")
if not publisher_command:
publisher_files = glob.glob(os.path.join(script_dir, "**/SimpleCommunicationPublisher*"), recursive=True)
publisher_command = next(iter(publisher_files), None)
assert publisher_command
subscriber_command = os.environ.get("SIMPLE_COMMUNICATION_SUBSCRIBER_BIN")
if not subscriber_command:
subscriber_files = glob.glob(os.path.join(script_dir, "**/SimpleCommunicationSubscriber*"), recursive=True)
subscriber_command = next(iter(subscriber_files), None)
assert subscriber_command
xml_file = os.environ.get("XML_FILE")
if xml_file:
real_xml_file = os.path.join(script_dir, xml_file)
else:
real_xml_file = os.path.join(script_dir, "liveliness_assertion.xml")
subscriber_proc = subprocess.Popen([subscriber_command, "--seed", str(os.getpid()), "--notexit",
"--xmlfile", real_xml_file])
publisher_proc = subprocess.Popen([publisher_command, "--seed", str(os.getpid()), "--exit_on_lost_liveliness",
"--xmlfile", real_xml_file], stdout=subprocess.PIPE)
while True:
line = publisher_proc.stdout.readline()
if line.strip().decode('utf-8').startswith('Publisher matched with subscriber '):
print("Subscriber matched.")
break
subscriber_proc.kill()
publisher_proc.communicate()
retvalue = publisher_proc.returncode
if retvalue != 0:
print("Test failed: " + str(retvalue))
else:
print("Test successed")
sys.exit(retvalue)
|
eProsima/Fast-DDS
|
test/communication/liveliness_assertion.py
|
Python
|
apache-2.0
| 2,185
| 0.003661
|
# Copyright 2005-2006 Daniel Henninger <jadestorm@nc.rr.com>
# Licensed for distribution under the GPL version 2, check COPYING for details
import utils
from twisted.internet import reactor
from twisted.words.xish.domish import Element
import jabw
import config
from debug import LogEvent, INFO, WARN, ERROR
import lang
import sha
import legacy
import globals
import base64
if not config.disableAvatars:
import Image
import StringIO
class Contact:
""" Represents a Jabber contact """
def __init__(self, jid, sub, contactList):
self.jid = jid
self.contactList = contactList
self.groups = []
self.sub = sub
self.nickname = ""
self.avatar = None
self.show = ""
self.status = ""
self.url = ""
self.ptype = "unavailable"
def removeMe(self):
""" Destroys this object. Does not remove the contact from the server's list. """
self.contactList = None
self.avatar = None
def syncContactGrantedAuth(self):
""" Since last using the transport the user has been granted authorisation by this contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "none":
self.sub = "to"
elif self.sub == "from":
self.sub = "both"
else:
return
self.updateRoster("subscribe")
def syncContactRemovedAuth(self):
""" Since last using the transport the user has been blocked by this contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "to":
self.sub = "none"
elif self.sub == "both":
self.sub = "from"
else:
return
self.updateRoster("unsubscribed")
def syncUserGrantedAuth(self):
""" Since last using the transport the user has granted authorisation to this contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "none":
self.sub = "from"
elif self.sub == "to":
self.sub = "both"
else:
return
self.updateRoster("subscribe")
def syncUserRemovedAuth(self):
""" Since last using the transport the user has removed this contact's authorisation.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "from":
self.sub = "none"
elif self.sub == "both":
self.sub = "to"
else:
return
self.updateRoster("unsubscribe")
def syncGroups(self, groups, push=True):
""" Set the groups that this contact is in on the legacy service.
By default this pushes the groups out with a presence subscribed packet. """
self.groups = groups
if push: self.updateRoster("subscribed");
def contactGrantsAuth(self):
""" Live roster event """
if self.sub == "none":
self.sub = "to"
elif self.sub == "from":
self.sub = "both"
self.sendSub("subscribed")
self.sendPresence()
def contactRemovesAuth(self):
""" Live roster event """
if self.sub == "to":
self.sub = "none"
elif self.sub == "both":
self.sub = "from"
self.sendSub("unsubscribed")
def contactRequestsAuth(self):
""" Live roster event """
self.sendSub("subscribe")
def contactDerequestsAuth(self):
""" Live roster event """
self.sendSub("unsubscribe")
def jabberSubscriptionReceived(self, subtype):
""" Updates the subscription state internally and pushes the update to the legacy server """
if subtype == "subscribe":
if self.sub == "to" or self.sub == "both":
self.sendSub("subscribed")
self.contactList.legacyList.addContact(self.jid)
elif subtype == "subscribed":
if self.sub == "none":
self.sub = "from"
if self.sub == "to":
self.sub = "both"
self.contactList.legacyList.authContact(self.jid)
elif(subtype == "unsubscribe"):
if self.sub == "none" and self.sub == "from":
self.sendSub("unsubscribed")
if self.sub == "both":
self.sub = "from"
if self.sub == "to":
self.sub = "none"
self.contactList.legacyList.removeContact(self.jid)
elif(subtype == "unsubscribed"):
if self.sub == "both":
self.sub = "to"
if self.sub == "from":
self.sub = "none"
self.contactList.legacyList.deauthContact(self.jid)
def updateNickname(self, nickname, push=True):
try:
decodednickname = unicode(self.nickname, errors='replace')
except:
decodednickname = self.nickname
if decodednickname != "nickname":
self.nickname = nickname
# will re-remove this if it's removed from JEP-0172.
#self.sendNickname()
if push: self.sendPresence()
#n = Element((None, "nick"))
#n.attributes["xmlns"] = globals.NICK
#n.addContent(nickname)
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.NICK, "current", n)
def updatePresence(self, show, status, ptype, force=False, tojid=None, url=None):
updateFlag = (self.show != show or self.status != status or self.ptype != ptype or force)
self.show = show
self.status = status
self.ptype = ptype
self.url = url
if updateFlag:
self.sendPresence(tojid)
def updateAvatar(self, avatar=None, push=True):
if config.disableAvatars: return
if self.avatar == avatar: return
self.avatar = avatar
if push: self.sendPresence()
#if self.avatar and not config.disableAvatars and not config.disablePEPAvatars:
#avatarHash = self.avatar.getImageHash()
#avatarData = self.avatar.getImageData()
#inbuff = StringIO.StringIO(avatarData)
#img = Image.open(inbuff)
#d = Element((None, "data"))
#d.attributes["xmlns"] = globals.AVATARDATA
#d.addContent(base64.encodestring(avatarData).replace("\n",""))
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.AVATARDATA, avatarHash, d)
#m = Element((None, "metadata"))
#m.attributes["xmlns"] = globals.AVATARMETADATA
#mi = m.addElement("info")
#mi.attributes["id"] = avatarHash
#mi.attributes["type"] = "image/png"
#mi.attributes["bytes"] = str(len(avatarData))
#mi.attributes["height"] = str(img.size[0])
#mi.attributes["width"] = str(img.size[1])
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.AVATARMETADATA, avatarHash, m)
def sendSub(self, ptype):
self.contactList.session.sendPresence(to=self.contactList.session.jabberID, fro=self.jid, ptype=ptype)
def sendNickname(self, tojid=None):
if not tojid:
tojid=self.contactList.session.jabberID
if self.nickname:
el = Element((None, "message"))
el.attributes["to"] = tojid
el.attributes["from"] = self.jid
nick = el.addElement("nick")
nick.attributes["xmlns"] = globals.NICK
nick.addContent(self.nickname)
self.contactList.session.pytrans.send(el)
def sendPresence(self, tojid=None):
avatarHash = ""
if self.avatar and not config.disableAvatars:
avatarHash = self.avatar.getImageHash()
caps = Element((None, "c"))
caps.attributes["xmlns"] = globals.CAPS
caps.attributes["node"] = legacy.url + "/protocol/caps"
caps.attributes["ver"] = legacy.version
if not tojid:
tojid=self.contactList.session.jabberID
self.contactList.session.sendPresence(to=tojid, fro=self.jid, ptype=self.ptype, show=self.show, status=self.status, avatarHash=avatarHash, nickname=self.nickname, payload=[caps], url=self.url)
def updateRoster(self, ptype):
self.contactList.session.sendRosterImport(jid=self.jid, ptype=ptype, sub=self.sub, groups=self.groups)
def fillvCard(self, vCard, jid):
if self.nickname:
NICKNAME = vCard.addElement("NICKNAME")
NICKNAME.addContent(self.nickname)
if self.avatar and not config.disableAvatars and not config.disableVCardAvatars:
PHOTO = self.avatar.makePhotoElement()
vCard.addChild(PHOTO)
user = jid.split('@')[0]
return self.contactList.session.legacycon.jabberVCardRequest(vCard, user)
class ContactList:
""" Represents the Jabber contact list """
def __init__(self, session):
LogEvent(INFO, session.jabberID)
self.session = session
self.contacts = {}
def removeMe(self):
""" Cleanly removes the object """
LogEvent(INFO, self.session.jabberID)
for jid in self.contacts:
self.contacts[jid].updatePresence("", "", "unavailable")
self.contacts[jid].removeMe()
self.contacts = {}
self.session = None
self.legacyList = None
def resendLists(self, tojid=None):
for jid in self.contacts:
if self.contacts[jid].status != "unavailable":
self.contacts[jid].sendPresence(tojid)
LogEvent(INFO, self.session.jabberID)
def createContact(self, jid, sub):
""" Creates a contact object. Use this to initialise the contact list
Returns a Contact object which you can call sync* methods on to synchronise
the user's legacy contact list with their Jabber list """
LogEvent(INFO, self.session.jabberID)
c = Contact(jid, sub, self)
self.contacts[jid] = c
return c
def getContact(self, jid):
""" Finds the contact. If one doesn't exist then a new one is created, with sub set to "none" """
if not self.contacts.has_key(jid):
self.contacts[jid] = Contact(jid, "none", self)
return self.contacts[jid]
def findContact(self, jid):
if self.contacts.has_key(jid):
return self.contacts[jid]
return None
def jabberSubscriptionReceived(self, jid, subtype):
self.getContact(jid).jabberSubscriptionReceived(subtype)
|
Alwnikrotikz/pyicqt
|
src/contact.py
|
Python
|
gpl-2.0
| 9,153
| 0.037365
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2017 AT&T Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import \
security_groups as security_groups_schema
from tempest.lib.api_schema.response.compute.v2_1 import servers as schema
from tempest.lib.api_schema.response.compute.v2_16 import servers as schemav216
from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
from tempest.lib.api_schema.response.compute.v2_26 import servers as schemav226
from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class ServersClient(base_compute_client.BaseComputeClient):
"""Service client for the resource /servers"""
schema_versions_info = [
{'min': None, 'max': '2.2', 'schema': schema},
{'min': '2.3', 'max': '2.5', 'schema': schemav23},
{'min': '2.6', 'max': '2.8', 'schema': schemav26},
{'min': '2.9', 'max': '2.15', 'schema': schemav29},
{'min': '2.16', 'max': '2.18', 'schema': schemav216},
{'min': '2.19', 'max': '2.25', 'schema': schemav219},
{'min': '2.26', 'max': '2.46', 'schema': schemav226},
{'min': '2.47', 'max': '2.47', 'schema': schemav247},
{'min': '2.48', 'max': None, 'schema': schemav248}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
super(ServersClient, self).__init__(
auth_provider, service, region, **kwargs)
self.enable_instance_password = enable_instance_password
def create_server(self, **kwargs):
"""Create server.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/compute/#create-server
:param name: Server name
:param imageRef: Image reference (UUID)
:param flavorRef: Flavor reference (UUID or full URL)
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
:param scheduler_hints: The name is changed to os:scheduler_hints and
the parameter is set in the same level as the parameter 'server'.
"""
body = copy.deepcopy(kwargs)
if body.get('disk_config'):
body['OS-DCF:diskConfig'] = body.pop('disk_config')
hints = None
if body.get('scheduler_hints'):
hints = {'os:scheduler_hints': body.pop('scheduler_hints')}
post_body = {'server': body}
if hints:
post_body.update(hints)
post_body = json.dumps(post_body)
resp, body = self.post('servers', post_body)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'reservation_id' in body:
return rest_client.ResponseBody(resp, body)
if self.enable_instance_password:
create_schema = schema.create_server_with_admin_pass
else:
create_schema = schema.create_server
self.validate_response(create_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def update_server(self, server_id, **kwargs):
"""Update server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#update-server
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
post_body = json.dumps({'server': kwargs})
resp, body = self.put("servers/%s" % server_id, post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_server, resp, body)
return rest_client.ResponseBody(resp, body)
def show_server(self, server_id):
"""Get server details.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#showServer
"""
resp, body = self.get("servers/%s" % server_id)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_server, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_server(self, server_id):
"""Delete server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#delete-server
"""
resp, body = self.delete("servers/%s" % server_id)
self.validate_response(schema.delete_server, resp, body)
return rest_client.ResponseBody(resp, body)
def list_servers(self, detail=False, **params):
"""List servers.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-servers
https://developer.openstack.org/api-ref/compute/#list-servers-detailed
"""
url = 'servers'
schema = self.get_schema(self.schema_versions_info)
_schema = schema.list_servers
if detail:
url += '/detail'
_schema = schema.list_servers_detail
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses(self, server_id):
"""Lists all addresses for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-ips
"""
resp, body = self.get("servers/%s/ips" % server_id)
body = json.loads(body)
self.validate_response(schema.list_addresses, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(server_id, network_id))
body = json.loads(body)
self.validate_response(schema.list_addresses_by_network, resp, body)
return rest_client.ResponseBody(resp, body)
def action(self, server_id, action_name,
schema=schema.server_actions_common_schema,
**kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % server_id,
post_body)
if body:
body = json.loads(body)
self.validate_response(schema, resp, body)
return rest_client.ResponseBody(resp, body)
def create_backup(self, server_id, **kwargs):
"""Backup a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#create-server-back-up-createbackup-action
"""
return self.action(server_id, "createBackup", **kwargs)
def change_password(self, server_id, **kwargs):
"""Change the root password for the server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action
"""
return self.action(server_id, 'changePassword', **kwargs)
def show_password(self, server_id):
resp, body = self.get("servers/%s/os-server-password" %
server_id)
body = json.loads(body)
self.validate_response(schema.show_password, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_password(self, server_id):
"""Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
resp, body = self.delete("servers/%s/os-server-password" %
server_id)
self.validate_response(schema.server_actions_delete_password,
resp, body)
return rest_client.ResponseBody(resp, body)
def reboot_server(self, server_id, **kwargs):
"""Reboot a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#reboot-server-reboot-action
"""
return self.action(server_id, 'reboot', **kwargs)
def rebuild_server(self, server_id, image_ref, **kwargs):
"""Rebuild a server with a new image.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#rebuild-server-rebuild-action
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
kwargs['imageRef'] = image_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
schema = self.get_schema(self.schema_versions_info)
if self.enable_instance_password:
rebuild_schema = schema.rebuild_server_with_admin_pass
else:
rebuild_schema = schema.rebuild_server
return self.action(server_id, 'rebuild',
rebuild_schema, **kwargs)
def resize_server(self, server_id, flavor_ref, **kwargs):
"""Change the flavor of a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#resize-server-resize-action
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
kwargs['flavorRef'] = flavor_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
return self.action(server_id, 'resize', **kwargs)
def confirm_resize_server(self, server_id, **kwargs):
"""Confirm the flavor change for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#confirm-resized-server-confirmresize-action
"""
return self.action(server_id, 'confirmResize',
schema.server_actions_confirm_resize,
**kwargs)
def revert_resize_server(self, server_id, **kwargs):
"""Revert a server back to its original flavor.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#revert-resized-server-revertresize-action
"""
return self.action(server_id, 'revertResize', **kwargs)
def list_server_metadata(self, server_id):
"""Lists all metadata for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-all-metadata
"""
resp, body = self.get("servers/%s/metadata" % server_id)
body = json.loads(body)
self.validate_response(schema.list_server_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
"""Sets one or more metadata items for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#create-or-replace-metadata-items
"""
if no_metadata_field:
post_body = ""
else:
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.set_server_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def update_server_metadata(self, server_id, meta):
"""Updates one or more metadata items for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#update-metadata-items
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.update_server_metadata,
resp, body)
return rest_client.ResponseBody(resp, body)
def show_server_metadata_item(self, server_id, key):
"""Shows details for a metadata item, by key, for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#show-metadata-item-details
"""
resp, body = self.get("servers/%s/metadata/%s" % (server_id, key))
body = json.loads(body)
self.validate_response(schema.set_show_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def set_server_metadata_item(self, server_id, key, meta):
"""Sets a metadata item, by key, for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#create-or-update-metadata-item
"""
post_body = json.dumps({'meta': meta})
resp, body = self.put('servers/%s/metadata/%s' % (server_id, key),
post_body)
body = json.loads(body)
self.validate_response(schema.set_show_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def delete_server_metadata_item(self, server_id, key):
"""Deletes a metadata item, by key, from a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#delete-metadata-item
"""
resp, body = self.delete("servers/%s/metadata/%s" %
(server_id, key))
self.validate_response(schema.delete_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def stop_server(self, server_id, **kwargs):
"""Stops a running server and changes its status to SHUTOFF.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#stop-server-os-stop-action
"""
return self.action(server_id, 'os-stop', **kwargs)
def start_server(self, server_id, **kwargs):
"""Starts a stopped server and changes its status to ACTIVE.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#start-server-os-start-action
"""
return self.action(server_id, 'os-start', **kwargs)
def attach_volume(self, server_id, **kwargs):
"""Attaches a volume to a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#attach-a-volume-to-an-instance
"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.attach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def update_attached_volume(self, server_id, attachment_id, **kwargs):
"""Swaps a volume attached to an instance for another volume"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.put('servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id),
post_body)
self.validate_response(schema.update_attached_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def detach_volume(self, server_id, volume_id): # noqa
"""Detaches a volume from a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#detach-a-volume-from-an-instance
"""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
self.validate_response(schema.detach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_attachment(self, server_id, volume_id):
"""Return details about the given volume attachment.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#show-a-detail-of-a-volume-attachment
"""
resp, body = self.get('servers/%s/os-volume_attachments/%s' % (
server_id, volume_id))
body = json.loads(body)
self.validate_response(schema.show_volume_attachment, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_attachments(self, server_id):
"""Returns the list of volume attachments for a given instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-volume-attachments-for-an-instance
"""
resp, body = self.get('servers/%s/os-volume_attachments' % (
server_id))
body = json.loads(body)
self.validate_response(schema.list_volume_attachments, resp, body)
return rest_client.ResponseBody(resp, body)
def add_security_group(self, server_id, **kwargs):
"""Add a security group to the server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#add-security-group-to-a-server-addsecuritygroup-action
"""
return self.action(server_id, 'addSecurityGroup', **kwargs)
def remove_security_group(self, server_id, **kwargs):
"""Remove a security group from the server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#remove-security-group-from-a-server-removesecuritygroup-action
"""
return self.action(server_id, 'removeSecurityGroup', **kwargs)
def live_migrate_server(self, server_id, **kwargs):
"""This should be called with administrator privileges.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#live-migrate-server-os-migratelive-action
"""
return self.action(server_id, 'os-migrateLive', **kwargs)
def migrate_server(self, server_id, **kwargs):
"""Migrate a server to a new host.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#migrate-server-migrate-action
"""
return self.action(server_id, 'migrate', **kwargs)
def lock_server(self, server_id, **kwargs):
"""Lock the given server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#lock-server-lock-action
"""
return self.action(server_id, 'lock', **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlock the given server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#unlock-server-unlock-action
"""
return self.action(server_id, 'unlock', **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspend the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#suspend-server-suspend-action
"""
return self.action(server_id, 'suspend', **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspend the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#resume-suspended-server-resume-action
"""
return self.action(server_id, 'resume', **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pause the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#pause-server-pause-action
"""
return self.action(server_id, 'pause', **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pause the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#unpause-server-unpause-action
"""
return self.action(server_id, 'unpause', **kwargs)
def reset_state(self, server_id, **kwargs):
"""Reset the state of a server to active/error.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#reset-server-state-os-resetstate-action
"""
return self.action(server_id, 'os-resetState', **kwargs)
def shelve_server(self, server_id, **kwargs):
"""Shelve the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#shelve-server-shelve-action
"""
return self.action(server_id, 'shelve', **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelve the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#unshelve-restore-shelved-server-unshelve-action
"""
return self.action(server_id, 'unshelve', **kwargs)
def shelve_offload_server(self, server_id, **kwargs):
"""Shelve-offload the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#shelf-offload-remove-server-shelveoffload-action
"""
return self.action(server_id, 'shelveOffload', **kwargs)
def get_console_output(self, server_id, **kwargs):
"""Get console output.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action
"""
return self.action(server_id, 'os-getConsoleOutput',
schema.get_console_output, **kwargs)
def get_remote_console(self, server_id, console_type, protocol, **kwargs):
"""Get a remote console.
For a full list of available parameters, please refer to the official
API reference:
TODO (markus_z) The api-ref for that isn't yet available, update this
here when the docs in Nova are updated. The old API is at
http://developer.openstack.org/api-ref/compute/#get-serial-console-os-getserialconsole-action
"""
param = {
'remote_console': {
'type': console_type,
'protocol': protocol,
}
}
post_body = json.dumps(param)
resp, body = self.post("servers/%s/remote-consoles" % server_id,
post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_remote_consoles, resp, body)
return rest_client.ResponseBody(resp, body)
def list_virtual_interfaces(self, server_id):
"""List the virtual interfaces used in an instance."""
resp, body = self.get('/'.join(['servers', server_id,
'os-virtual-interfaces']))
body = json.loads(body)
self.validate_response(schema.list_virtual_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#rescue-server-rescue-action
"""
if self.enable_instance_password:
rescue_schema = schema.rescue_server_with_admin_pass
else:
rescue_schema = schema.rescue_server
return self.action(server_id, 'rescue', rescue_schema, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#unrescue-server-unrescue-action
"""
return self.action(server_id, 'unrescue')
def show_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/diagnostics" % server_id)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.show_server_diagnostics, resp, body)
return rest_client.ResponseBody(resp, body)
def list_instance_actions(self, server_id):
"""List the provided server action."""
resp, body = self.get("servers/%s/os-instance-actions" %
server_id)
body = json.loads(body)
self.validate_response(schema.list_instance_actions, resp, body)
return rest_client.ResponseBody(resp, body)
def show_instance_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(server_id, request_id))
body = json.loads(body)
self.validate_response(schema.show_instance_action, resp, body)
return rest_client.ResponseBody(resp, body)
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#force-delete-server-forcedelete-action
"""
return self.action(server_id, 'forceDelete', **kwargs)
def restore_soft_deleted_server(self, server_id, **kwargs):
"""Restore a soft-deleted server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#restore-soft-deleted-instance-restore-action
"""
return self.action(server_id, 'restore', **kwargs)
def reset_network(self, server_id, **kwargs):
"""Reset the Network of a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#reset-networking-on-a-server-resetnetwork-action
"""
return self.action(server_id, 'resetNetwork', **kwargs)
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#inject-network-information-injectnetworkinfo-action
"""
return self.action(server_id, 'injectNetworkInfo', **kwargs)
def get_vnc_console(self, server_id, **kwargs):
"""Get URL of VNC console.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action
"""
return self.action(server_id, "os-getVNCConsole",
schema.get_vnc_console, **kwargs)
def add_fixed_ip(self, server_id, **kwargs):
"""Add a fixed IP to server instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action
"""
return self.action(server_id, 'addFixedIp', **kwargs)
def remove_fixed_ip(self, server_id, **kwargs):
"""Remove input fixed IP from input server instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action
"""
return self.action(server_id, 'removeFixedIp', **kwargs)
def list_security_groups_by_server(self, server_id):
"""Lists security groups for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-security-groups-by-server
"""
resp, body = self.get("servers/%s/os-security-groups" % server_id)
body = json.loads(body)
self.validate_response(security_groups_schema.list_security_groups,
resp, body)
return rest_client.ResponseBody(resp, body)
def list_tags(self, server_id):
"""Lists all tags for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-tags
"""
url = 'servers/%s/tags' % server_id
resp, body = self.get(url)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_tags, resp, body)
return rest_client.ResponseBody(resp, body)
def update_all_tags(self, server_id, tags):
"""Replaces all tags on specified server with the new set of tags.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#replace-tags
:param tags: List of tags to replace current server tags with.
"""
url = 'servers/%s/tags' % server_id
put_body = {'tags': tags}
resp, body = self.put(url, json.dumps(put_body))
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_all_tags, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_all_tags(self, server_id):
"""Deletes all tags from the specified server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#delete-all-tags
"""
url = 'servers/%s/tags' % server_id
resp, body = self.delete(url)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.delete_all_tags, resp, body)
return rest_client.ResponseBody(resp, body)
def check_tag_existence(self, server_id, tag):
"""Checks tag existence on the server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#check-tag-existence
:param tag: Check for existence of tag on specified server.
"""
url = 'servers/%s/tags/%s' % (server_id, tag)
resp, body = self.get(url)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.check_tag_existence, resp, body)
return rest_client.ResponseBody(resp, body)
def update_tag(self, server_id, tag):
"""Adds a single tag to the server if server has no specified tag.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#add-a-single-tag
:param tag: Tag to be added to the specified server.
"""
url = 'servers/%s/tags/%s' % (server_id, tag)
resp, body = self.put(url, None)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_tag, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_tag(self, server_id, tag):
"""Deletes a single tag from the specified server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#delete-a-single-tag
:param tag: Tag to be removed from the specified server.
"""
url = 'servers/%s/tags/%s' % (server_id, tag)
resp, body = self.delete(url)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.delete_tag, resp, body)
return rest_client.ResponseBody(resp, body)
def evacuate_server(self, server_id, **kwargs):
"""Evacuate the given server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#evacuate-server-evacuate-action
"""
if self.enable_instance_password:
evacuate_schema = schema.evacuate_server_with_admin_pass
else:
evacuate_schema = schema.evacuate_server
return self.action(server_id, 'evacuate',
evacuate_schema,
**kwargs)
|
Juniper/tempest
|
tempest/lib/services/compute/servers_client.py
|
Python
|
apache-2.0
| 36,704
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comments',
name='comments_date',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 17, 12, 54, 47, 78000, tzinfo=utc)),
preserve_default=False,
),
]
|
Timurdov/bionicprojectpython
|
shadrus/article/migrations/0002_comments_comments_date.py
|
Python
|
apache-2.0
| 548
| 0.001825
|
NAME='gif'
GCC_LIST=['gif']
|
unbit/uwsgi-gif
|
uwsgiplugin.py
|
Python
|
mit
| 28
| 0.071429
|
from hashlib import sha256
from .etl import ETL
from kombu.mixins import ConsumerMixin
from kombu import Connection
import traceback
import Queue
import json
import time
import pytz
from datetime import datetime
from tzlocal import get_localzone
import socket
import logging
import os
class KnownHosts(object):
HOST_FILE = "/etc/hosts"
def __init__(self, filename=HOST_FILE):
self.filename = filename
try:
os.stat(self.filename)
except:
raise
self.mapping = self.read_hosts_file(filename)
@classmethod
def read_hosts_file(cls, filename):
mapping = {}
for line in open(filename).readlines():
if line.strip() == '':
continue
elif line.strip().find('#') == 0:
continue
elif len(line.split()) < 2:
continue
l = line.strip()
ip = l.split()[0]
host_names = l.split()[1:]
if len(host_names) == 0:
continue
# FIXME this means the expected mapping[ip] = host
# may not be right
ip_host_mappings = [(ip, h) for h in host_names]
for ip, host in ip_host_mappings:
mapping[host.strip()] = ip.strip()
mapping[ip.strip()] = host.strip()
return mapping
def is_ip(self, ip):
# FIXME track down a regex and use that
d = ip.split('.')
if len(d) != 3:
return False
if not all([i.isdigit() for i in d]):
return False
if not all([int(i, 10) >= 0 for i in d]):
return False
if not all([int(i, 10) <= 255 for i in d]):
return False
return True
def resolve_host(self, ip_host):
if ip_host in self.mapping and \
not self.is_ip(ip_host):
return self.mapping[ip_host]
name = ip_host
try:
name, _, _ = socket.gethostbyname(ip_host)
self.mapping[ip_host] = name
self.mapping[name] = ip_host
except:
name = ip_host
self.mapping[ip_host] = name
return name
class HitterService(ConsumerMixin):
NAME = 'processor'
BROKER_URI = "redis://127.0.0.1:6379"
BROKER_QUEUE = "mystified-catcher"
KNOWN_HOSTS = KnownHosts()
LOGSTASH_QUEUE = "logstash-results"
SYSLOG_MSG_TYPE = {
0: "EMERGENCY",
1: "ALERT",
2: "CRITICAL",
3: "ERROR",
4: "WARNING",
5: "NOTICE",
6: "INFORMATIONAL",
7: "DEBUG",
}
MY_TZ = os.environ.get('CATCHER_TZ', 'NOT_SET')
TZ_INFO = pytz.timezone(MY_TZ) if MY_TZ != 'NOT_SET' else None
def __init__(self, broker_uri=BROKER_URI, broker_queue=BROKER_QUEUE,
hosts_file=None, mongo_backend=None,
etl_backend=ETL, msg_limit=100,
# leaving it open to use kombu to buffer messages
store_uri=BROKER_URI,
store_queue=LOGSTASH_QUEUE):
if hosts_file is not None:
self.KNOWN_HOSTS = KnownHosts(filename=hosts_file)
self.broker_uri = broker_uri
self.broker_queue = broker_queue
self.store_uri = store_uri
self.store_queue = store_queue
self.mongo_backend = mongo_backend
self.etl_backend = etl_backend
self.keep_running = False
self.msg_limit = msg_limit
@classmethod
def split_alert_message(cls, data):
t = ''
msg = data
end = data.find('>')
start = data.find('<')
if len(data) < end+1:
return '', msg
if start == 0 and end > 0 and end < 10:
t = data[start+1:end]
if not t.isdigit():
return '', data
else:
msg = data[end+1:]
return t, msg
@classmethod
def calculate_msg_type(cls, data):
t, msg = cls.split_alert_message(data)
if len(t) == 0:
return "UNKNOWN"
v = int(t, 10)
if v > 7:
v &= 0x7
return cls.SYSLOG_MSG_TYPE[v]
@classmethod
def format_timestamp(self, tstamp):
if self.TZ_INFO is not None:
local_tz = self.TZ_INFO.localize(tstamp, is_dst=None)
utc_tz = local_tz.astimezone(pytz.utc)
return str(utc_tz.strftime("%Y-%m-%dT%H:%M:%S") +\
".%03d" % (tstamp.microsecond / 1000) + "Z")
return str(tstamp.strftime("%Y-%m-%dT%H:%M:%S") +\
".%03d" % (tstamp.microsecond / 1000))
@classmethod
def get_base_json(cls, syslog_msg, syslog_server_ip,
catcher_name, catcher_host, catcher_tz):
r = {'source': "syslog", 'raw': syslog_msg,
'type': 'json',
'_id': sha256(syslog_msg).hexdigest(),
'@timestamp': cls.format_timestamp(datetime.now()),
'@version': "1",
'message': "transformed syslog",
'path': '',
'tags': [],
'catcher_tz': catcher_tz,
'catcher_host': catcher_host,
'catcher_name': catcher_name
}
t, msg = cls.split_alert_message(syslog_msg)
r['syslog_level'] = cls.calculate_msg_type(syslog_msg)
r['syslog_msg'] = msg
r['syslog_tag'] = t
r['syslog_server'] = cls.resolve_host(syslog_server_ip)
r['syslog_server_ip'] = syslog_server_ip
r['syslog_catcher'] = catcher_name
return r
@classmethod
def resolve_host(cls, ip_host):
return cls.KNOWN_HOSTS.resolve_host(ip_host)
def process_message(self, syslog_msg,
syslog_server_ip,
catcher_name, catcher_host, catcher_tz):
m = "Extracting and converting msg from %s msg (syslog: %s)" % (syslog_server_ip, catcher_name)
logging.debug(m)
r = self.get_base_json(syslog_msg, syslog_server_ip,
catcher_name, catcher_host, catcher_tz)
sm = {}
try:
result = self.etl_backend.syslog_et(syslog_msg)
sm.update(result.get('rule_results', result))
if 'rule_name' in result:
sm['rule_name'] = result.get('rule_name')
sm['tags'] = []
if sm.get('syslog_level', None) is not None:
sm['tags'].append(sm['syslog_level'])
if sm.get('rule_name', None) is not None:
sm['tags'].append(sm['rule_name'])
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
r.update(sm)
return r
def extract_message_components(self, msg_dict):
syslog_msg = msg_dict.get('syslog_msg', '')
syslog_server_ip = msg_dict.get('syslog_server_ip', '')
catcher_host = msg_dict.get('catcher_host', '')
catcher_name = msg_dict.get('catcher_name', '')
catcher_tz = msg_dict.get('catcher_tz', str(get_localzone()))
return self.process_message(syslog_msg,
syslog_server_ip,
catcher_name, catcher_host, catcher_tz)
def process_and_report(self, incoming_msg):
logging.debug("Processing and report syslog_msg")
message = incoming_msg
if isinstance(incoming_msg, str):
try:
message = json.loads(incoming_msg)
except:
message = {}
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
raise
etl_data = self.extract_message_components(message)
syslog_msg = etl_data['raw']
self.store_results(syslog_msg, etl_data)
return etl_data
def _read_messages(self, uri, queue, callback=None, cnt=1):
msgs = []
read_all = False
if cnt < 1:
read_all = True
try:
logging.debug("Reading the messages")
with Connection(uri) as conn:
q = conn.SimpleQueue(queue)
while cnt > 0 or read_all:
cnt += -1
try:
message = q.get(block=False)
if callback is not None:
data = callback(message.payload)
msgs.append(data)
logging.debug("made it here 2")
logging.debug(data)
message.ack()
except Queue.Empty:
logging.debug("%s queue is empty" % queue)
break
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
logging.debug("Successfully read %d messages" % len(msgs))
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
logging.debug("Failed to read message")
return msgs
def store_mongo(self, syslog_msg, etl_data):
if self.mongo_backend is not None:
m = "Sending results to mongo"
logging.debug(m)
raw_insert, json_insert = self.mongo_backend.insert(
syslog_msg,
etl_data)
if not raw_insert:
logging.debug("Failed to insert the raw syslog information in mongo")
if not json_insert:
logging.debug("Failed to insert the processed syslog information in mongo")
def store_kombu(self, etl_data):
logging.debug("Storing message in logstash queue")
try:
with Connection(self.store_uri) as conn:
q = conn.SimpleQueue(self.store_queue)
q.put(etl_data)
q.close()
logging.debug("Storing message in logstash success")
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
logging.debug("Storing message in logstash queue failed")
def store_results(self, syslog_msg, etl_data):
self.store_mongo(syslog_msg, etl_data)
self.store_kombu(etl_data)
def read_messages(self):
msgs = self._read_messages(self.broker_uri, self.broker_queue,
cnt=self.msg_limit,
callback=self.process_and_report)
return msgs
def serve_forever(self, poll_interval=1.0):
self.keep_running = True
while self.keep_running:
try:
self.read_messages()
time.sleep(poll_interval)
except KeyboardInterrupt:
break
|
deeso/slow-hitter
|
src/slow/hitter.py
|
Python
|
apache-2.0
| 10,877
| 0.001747
|
# -*- coding: utf-'8' "-*-"
import base64
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment),
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
values.update({
'merchantReference': values['reference'],
'paymentAmount': '%d' % int(float_round(values['amount'], 2) * 100),
'currencyCode': values['currency'] and values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': values.get('partner_lang'),
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
'merchantReturnData': json.dumps({'return_url': '%s' % values.pop('return_url')}) if values.get('return_url') else False,
'merchantSig': self._adyen_generate_merchant_sig(acquirer, 'in', values),
})
return values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = _('Adyen: received data with missing reference (%s) or missing pspReference (%s)') % (reference, pspReference)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = _('Adyen: received data for reference %s') % (reference)
if not tx_ids:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = _('Adyen: invalid merchantSig, received %s, computed %s') % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'acquirer_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'acquirer_reference': data.get('pspReference'),
})
return True
else:
error = _('Adyen: feedback error')
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
|
minhphung171093/GreenERP
|
openerp/addons/payment_adyen/models/adyen.py
|
Python
|
gpl-3.0
| 7,511
| 0.003728
|
import json
class JSON_RPCError(Exception):
""" Base class for JSON-RPC errors. """
def to_json(self):
return json.dumps({
'code': self.code,
'message': self.__doc__,
})
class ParseError(JSON_RPCError):
""" Invalid JSON was received by the server. An error occurred on the
server while parsing the JSON text.
"""
code = -32700
class InvalidRequestError(JSON_RPCError):
""" The JSON sent is not a valid Request object. """
code = -32600
class MethodNotFoundError(JSON_RPCError):
""" The method does not exist / is not available. """
code = -32601
class InvalidParamsError(JSON_RPCError):
""" Invalid methods parameter(s). """
code = -32602
class InternalError(JSON_RPCError):
""" Internal JSON-RPC error. """
code = -32603
|
AdvancedClimateSystems/Diode
|
diode/exceptions.py
|
Python
|
mpl-2.0
| 830
| 0
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Card-Magic',
version='1.0',
description='The best card and decks ever',
author='Juan Carlos Ferrer',
author_email='juan.carlos@micronixsolutions.com',
packages=['cardmagic', 'cardmagic.tests'],
package_data = {
'cardmagic': [
'translations/en/LC_MESSAGES/*',
'translations/es/LC_MESSAGES/*'],
},
)
|
juancferrer/Card-Magic
|
setup.py
|
Python
|
bsd-3-clause
| 463
| 0.008639
|
"""
********************************************************************************
Learn Python the Hard Way Third Edition, by
Zed A. Shaw
ISBN: 978-0321884916
********************************************************************************
"""
import random
from urllib import urlopen
import sys
#debug = "DEBUG: "
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self,@@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
#print debug + "0"
# load up the words from the website
#for word in urlopen(WORD_URL).readlines():
# once downloaded just open the file locally):
for word in open('words.txt').readlines():
WORDS.append(word.strip())
#print debug + word
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
#fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
#fake other names
for word in other_names:
result = result.replace("***", word, 1)
#fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until EOF
try:
while True:
snippets = PHRASES.keys()
#print debug + "3"
random.shuffle(snippets)
for snippet in snippets:
#print debug + "4"
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
msnorm/projects
|
zspy2/ex41/ex41.py
|
Python
|
mit
| 2,749
| 0.005457
|
from collections import OrderedDict
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = OrderedDict(
(
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
(
"{{company_limited_prefix}}{{last_name}}{{company_suffix}} {{company_limited_suffix}}",
0.2,
),
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
("{{company_prefix}}{{last_name}}", 0.2),
("{{company_prefix}}{{last_name}}{{company_suffix}}", 0.2),
("{{last_name}}{{company_suffix}}", 0.1),
("{{nonprofit_prefix}}{{last_name}}", 0.1),
("{{last_name}}-{{last_name}}", 0.05),
("{{last_name}}และ{{last_name}}", 0.05),
("{{company_limited_prefix}}{{last_name}}", 0.01),
)
)
company_prefixes = OrderedDict(
(
("ห้างหุ้นส่วนจำกัด ", 0.3),
("หจก.", 0.2),
("บจก.", 0.1),
("บมจ.", 0.1),
("ห้างหุ้นส่วนสามัญ ", 0.1),
("หสน.", 0.01),
)
)
nonprofit_prefixes = OrderedDict(
(
("สมาคม", 0.4),
("มูลนิธิ", 0.3),
("ชมรม", 0.2),
("สหภาพแรงงาน", 0.1),
)
)
company_suffixes = (
"และเพื่อน",
"และบุตร",
"แอนด์ซันส์",
"กรุ๊ป",
"การช่าง",
"ก่อสร้าง",
"บริการ",
"เซอร์วิส",
"กลการ",
"ซัพพลาย",
"คอมมิวนิเคชั่น",
"พืชผล",
"เอเยนซี",
"เอ็นจิเนียริ่ง",
"คอนสตรัคชั่น",
"วิศวกรรม",
"วิศวการ",
"คอมพิวเตอร์",
"พานิช",
"ขนส่ง",
"เฟอนิชชิ่ง",
"เฟอร์นิเจอร์",
"อุตสาหกรรม",
"เอนเตอรไพรส์",
"จิวเวลรี่",
"อะไหล่ยนต์",
"ภาพยนตร์",
"ยานยนต์",
"เทรดดิ้ง",
"การค้า",
"แลบ",
"เคมิคอล",
"อิมปอร์ตเอ็กซปอร์ต",
"อินเตอร์เนชั่นแนล",
"บรรจุภัณฑ์",
"แพคกิ้ง",
"มอเตอร์",
"โอสถ",
"การบัญชี",
"สโตร์",
)
company_limited_prefixes = OrderedDict(
(
("บริษัท ", 0.95),
("ธนาคาร", 0.03),
("บริษัทหลักทรัพย์ ", 0.005),
("กองทุนรวม", 0.005),
)
)
company_limited_suffixes = OrderedDict(
(
("จำกัด", 0.85),
("จำกัด (มหาชน)", 0.15),
)
)
def company_prefix(self) -> str:
"""
:example: 'ห้างหุ้นส่วนจำกัด'
"""
return self.random_element(self.company_prefixes)
def company_limited_prefix(self) -> str:
"""
:example: 'บริษัท'
"""
return self.random_element(self.company_limited_prefixes)
def company_limited_suffix(self) -> str:
"""
:example: 'จำกัด'
"""
return self.random_element(self.company_limited_suffixes)
def nonprofit_prefix(self) -> str:
"""
:example: 'มูลนิธิ'
"""
return self.random_element(self.nonprofit_prefixes)
|
joke2k/faker
|
faker/providers/company/th_TH/__init__.py
|
Python
|
mit
| 4,173
| 0.000946
|
"""Support for raspihats board binary sensors."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_NAME,
DEVICE_DEFAULT_NAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import (
CONF_BOARD,
CONF_CHANNELS,
CONF_I2C_HATS,
CONF_INDEX,
CONF_INVERT_LOGIC,
DOMAIN,
I2C_HAT_NAMES,
I2C_HATS_MANAGER,
I2CHatsException,
I2CHatsManager,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_INVERT_LOGIC = False
DEFAULT_DEVICE_CLASS = None
_CHANNELS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_INDEX): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS, default=DEFAULT_DEVICE_CLASS): cv.string,
}
]
)
_I2C_HATS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_BOARD): vol.In(I2C_HAT_NAMES),
vol.Required(CONF_ADDRESS): vol.Coerce(int),
vol.Required(CONF_CHANNELS): _CHANNELS_SCHEMA,
}
]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_I2C_HATS): _I2C_HATS_SCHEMA}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the raspihats binary_sensor devices."""
I2CHatBinarySensor.I2C_HATS_MANAGER = hass.data[DOMAIN][I2C_HATS_MANAGER]
binary_sensors = []
i2c_hat_configs = config.get(CONF_I2C_HATS, [])
for i2c_hat_config in i2c_hat_configs:
address = i2c_hat_config[CONF_ADDRESS]
board = i2c_hat_config[CONF_BOARD]
try:
assert I2CHatBinarySensor.I2C_HATS_MANAGER
I2CHatBinarySensor.I2C_HATS_MANAGER.register_board(board, address)
for channel_config in i2c_hat_config[CONF_CHANNELS]:
binary_sensors.append(
I2CHatBinarySensor(
address,
channel_config[CONF_INDEX],
channel_config[CONF_NAME],
channel_config[CONF_INVERT_LOGIC],
channel_config[CONF_DEVICE_CLASS],
)
)
except I2CHatsException as ex:
_LOGGER.error(
"Failed to register %s I2CHat@%s %s", board, hex(address), str(ex)
)
add_entities(binary_sensors)
class I2CHatBinarySensor(BinarySensorEntity):
"""Representation of a binary sensor that uses a I2C-HAT digital input."""
I2C_HATS_MANAGER: I2CHatsManager | None = None
def __init__(self, address, channel, name, invert_logic, device_class):
"""Initialize the raspihats sensor."""
self._address = address
self._channel = channel
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._device_class = device_class
self._state = self.I2C_HATS_MANAGER.read_di(self._address, self._channel)
def online_callback():
"""Call fired when board is online."""
self.schedule_update_ha_state()
self.I2C_HATS_MANAGER.register_online_callback(
self._address, self._channel, online_callback
)
def edge_callback(state):
"""Read digital input state."""
self._state = state
self.schedule_update_ha_state()
self.I2C_HATS_MANAGER.register_di_callback(
self._address, self._channel, edge_callback
)
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def name(self):
"""Return the name of this sensor."""
return self._name
@property
def should_poll(self):
"""No polling needed for this sensor."""
return False
@property
def is_on(self):
"""Return the state of this sensor."""
return self._state != self._invert_logic
|
rohitranjan1991/home-assistant
|
homeassistant/components/raspihats/binary_sensor.py
|
Python
|
mit
| 4,436
| 0.001127
|
__author__ = 'alberto'
import time
from functools import wraps
from config import logger
def measure_time(func):
"""
Decorator that reports the execution time.
"""
@wraps(func)
def wrapper(*args, **kwargs):
logger.info("Running %s", func.__name__)
start = time.time()
result = func(*args, **kwargs)
end = time.time()
logger.info("Execution time: %s", end - start)
return result
return wrapper
|
jresendiz27/EvolutionaryComputing
|
escom/pepo/utils.py
|
Python
|
apache-2.0
| 469
| 0.002132
|
import os
import sys
import time
import pickle
import threading
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
# required to reserve changed attributes
from pandaserver.taskbuffer import JobSpec
from pandaserver.taskbuffer import FileSpec
JobSpec.reserveChangedState = True
FileSpec.reserveChangedState = True
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
_logger = PandaLogger().getLogger('TaskBufferInterface')
# method class
class TaskBufferMethod:
def __init__(self,methodName,commDict,childlock,comLock,resLock):
self.methodName = methodName
self.childlock = childlock
self.commDict = commDict
self.comLock = comLock
self.resLock = resLock
def __call__(self,*args,**kwargs):
log = LogWrapper(_logger, 'pid={} thr={} {}'.format(os.getpid(),
threading.current_thread().ident,
self.methodName))
log.debug('start')
# get lock among children
i = self.childlock.get()
# make dict to send it master
self.commDict[i].update({'methodName': self.methodName,
'args': pickle.dumps(args),
'kwargs': pickle.dumps(kwargs)})
# send notification to master
self.comLock[i].release()
# wait response
self.resLock[i].acquire()
res = pickle.loads(self.commDict[i]['res'])
statusCode = self.commDict[i]['stat']
# release lock to children
self.childlock.put(i)
log.debug('end')
# return
if statusCode == 0:
return res
else:
errtype,errvalue = res
raise RuntimeError("{0}: {1} {2}".format(self.methodName,errtype.__name__,errvalue))
# child class
class TaskBufferInterfaceChild:
# constructor
def __init__(self,commDict,childlock,comLock,resLock):
self.childlock = childlock
self.commDict = commDict
self.comLock = comLock
self.resLock = resLock
# method emulation
def __getattr__(self,attrName):
return TaskBufferMethod(attrName,self.commDict,self.childlock,
self.comLock,self.resLock)
# master class
class TaskBufferInterface:
# constructor
def __init__(self):
# make manager to create shared objects
self.manager = multiprocessing.Manager()
# main loop
def run(self, taskBuffer, commDict, comLock, resLock, to_stop):
with ThreadPoolExecutor(max_workers=taskBuffer.get_num_connections()) as pool:
[pool.submit(self.thread_run, taskBuffer, commDict[i], comLock[i], resLock[i], to_stop) for i in commDict.keys()]
# main loop
def thread_run(self, taskBuffer, commDict, comLock, resLock, to_stop):
# main loop
while True:
# stop sign
if to_stop.value:
break
# wait for command
if not comLock.acquire(timeout=0.25):
continue
try:
# get command from child
methodName = commDict['methodName']
args = pickle.loads(commDict['args'])
kwargs = pickle.loads(commDict['kwargs'])
# execute
method = getattr(taskBuffer,methodName)
res = method(*args, **kwargs)
commDict['stat'] = 0
# set response
commDict['res'] = pickle.dumps(res)
except Exception:
res = sys.exc_info()[:2]
commDict['stat'] = 1
commDict['res'] = pickle.dumps(res)
# send response
resLock.release()
# launcher
def launch(self, taskBuffer):
# shared objects
self.childlock = multiprocessing.Queue()
self.commDict = dict()
self.comLock = dict()
self.resLock = dict()
self.to_stop = multiprocessing.Value('i', 0)
for i in range(taskBuffer.get_num_connections()):
self.childlock.put(i)
self.commDict[i] = self.manager.dict()
self.comLock[i] = multiprocessing.Semaphore(0)
self.resLock[i] = multiprocessing.Semaphore(0)
# run
self.process = multiprocessing.Process(target=self.run,
args=(taskBuffer,
self.commDict, self.comLock,
self.resLock, self.to_stop))
self.process.start()
# get interface for child
def getInterface(self):
return TaskBufferInterfaceChild(self.commDict, self.childlock, self.comLock, self.resLock)
# stop the loop
def stop(self):
with self.to_stop.get_lock():
self.to_stop.value = 1
while self.process.is_alive():
time.sleep(1)
# kill
def terminate(self):
self.process.terminate()
|
PanDAWMS/panda-server
|
pandaserver/taskbuffer/TaskBufferInterface.py
|
Python
|
apache-2.0
| 5,146
| 0.008356
|
# coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import datetime
from feedparser.util import FeedParserDict
from hachoir_parser import createParser
import sickbeard
from sickbeard import logger
from sickbeard.classes import Proper, TorrentSearchResult
from sickbeard.common import Quality
from sickbeard.db import DBConnection
from sickrage.helper.common import try_int
from sickrage.helper.exceptions import ex
from sickrage.providers.GenericProvider import GenericProvider
from sickrage.show.Show import Show
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.ratio = None
self.provider_type = GenericProvider.TORRENT
def find_propers(self, search_date=None):
results = []
db = DBConnection()
placeholder = ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST])
sql_results = db.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate'
' FROM tv_episodes AS e'
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)'
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + placeholder + ') and e.is_proper = 0'
)
for result in sql_results or []:
show = Show.find(sickbeard.showList, int(result[b'showid']))
if show:
episode = show.getEpisode(result[b'season'], result[b'episode'])
for term in self.proper_strings:
search_strings = self._get_episode_search_strings(episode, add_string=term)
for item in self.search(search_strings[0]):
title, url = self._get_title_and_url(item)
results.append(Proper(title, url, datetime.today(), show))
return results
def is_active(self):
return bool(sickbeard.USE_TORRENTS) and self.is_enabled()
@property
def _custom_trackers(self):
if not (sickbeard.TRACKERS_LIST and self.public):
return ''
return '&tr=' + '&tr='.join({x.strip() for x in sickbeard.TRACKERS_LIST.split(',') if x.strip()})
def _get_result(self, episodes):
return TorrentSearchResult(episodes)
def _get_size(self, item):
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
else:
size = -1
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024 * 1024:
size = -1
return try_int(size, -1)
def _get_storage_dir(self):
return sickbeard.TORRENT_DIR
def _get_title_and_url(self, item):
if isinstance(item, (dict, FeedParserDict)):
download_url = item.get('url', '')
title = item.get('title', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
download_url = item[1]
title = item[0]
else:
download_url = ''
title = ''
if title.endswith('DIAMOND'):
logger.log('Skipping DIAMOND release for mass fake releases.')
download_url = title = 'FAKERELEASE'
if download_url:
download_url = download_url.replace('&', '&')
if title:
title = title.replace(' ', '.')
return title, download_url
def _verify_download(self, file_name=None):
try:
parser = createParser(file_name)
if parser:
# pylint: disable=protected-access
# Access to a protected member of a client class
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except Exception:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log('Failed to validate torrent file: {0}'.format(ex(e)), logger.DEBUG)
logger.log('Result is not a valid torrent file', logger.DEBUG)
return False
def seed_ratio(self):
return self.ratio
|
Maximilian-Reuter/SickRage-1
|
sickrage/providers/torrent/TorrentProvider.py
|
Python
|
gpl-3.0
| 5,136
| 0.001363
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common interfaces and implementation."""
import abc
import collections
import six
def _fuss(tuplified_metadata):
return tuplified_metadata + (
(
'grpc.metadata_added_by_runtime',
'gRPC is allowed to add metadata in transmission and does so.',
),
)
FUSSED_EMPTY_METADATA = _fuss(())
def fuss_with_metadata(metadata):
if metadata is None:
return FUSSED_EMPTY_METADATA
else:
return _fuss(tuple(metadata))
def rpc_names(service_descriptors):
rpc_names_to_descriptors = {}
for service_descriptor in service_descriptors:
for method_descriptor in service_descriptor.methods_by_name.values():
rpc_name = '/{}/{}'.format(
service_descriptor.full_name, method_descriptor.name)
rpc_names_to_descriptors[rpc_name] = method_descriptor
return rpc_names_to_descriptors
class ChannelRpcRead(
collections.namedtuple(
'ChannelRpcRead',
('response', 'trailing_metadata', 'code', 'details',))):
pass
class ChannelRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def initial_metadata(self):
raise NotImplementedError()
@abc.abstractmethod
def add_request(self, request):
raise NotImplementedError()
@abc.abstractmethod
def close_requests(self):
raise NotImplementedError()
@abc.abstractmethod
def take_response(self):
raise NotImplementedError()
@abc.abstractmethod
def cancel(self, code, details):
raise NotImplementedError()
@abc.abstractmethod
def termination(self):
raise NotImplementedError()
@abc.abstractmethod
def is_active(self):
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
raise NotImplementedError()
@abc.abstractmethod
def add_callback(self, callback):
raise NotImplementedError()
class ChannelHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_rpc(
self, method_full_rpc_name, invocation_metadata, requests,
requests_closed, timeout):
raise NotImplementedError()
class ServerRpcRead(
collections.namedtuple('ServerRpcRead',
('request', 'requests_closed', 'terminated',))):
pass
REQUESTS_CLOSED = ServerRpcRead(None, True, False)
TERMINATED = ServerRpcRead(None, False, True)
class ServerRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def send_initial_metadata(self, initial_metadata):
raise NotImplementedError()
@abc.abstractmethod
def take_request(self):
raise NotImplementedError()
@abc.abstractmethod
def add_response(self, response):
raise NotImplementedError()
@abc.abstractmethod
def send_termination(self, trailing_metadata, code, details):
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
raise NotImplementedError()
class Serverish(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_unary_unary(
self, method_descriptor, handler, invocation_metadata, request,
deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_unary_stream(
self, method_descriptor, handler, invocation_metadata, request,
deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_unary(
self, method_descriptor, handler, invocation_metadata, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_stream(
self, method_descriptor, handler, invocation_metadata, deadline):
raise NotImplementedError()
|
adelez/grpc
|
src/python/grpcio_testing/grpc_testing/_common.py
|
Python
|
apache-2.0
| 4,406
| 0.000227
|
import os
import time
import threading
import neovim
import pytest
threading.current_thread().name = "Test"
NVIM_LISTEN_ADDRESS = "/tmp/nvim-LanguageClient-IntegrationTest"
project_root = os.path.dirname(os.path.abspath(__file__))
def join_path(path: str) -> str:
"""Join path to this project tests root."""
return os.path.join(project_root, path)
PATH_MAIN_RS = join_path("data/sample-rs/src/main.rs")
PATH_LIBS_RS = join_path("data/sample-rs/src/libs.rs")
PATH_CODEACTION = join_path("data/sample-ts/src/codeAction.ts")
print(PATH_MAIN_RS)
def assertRetry(predicate, retry_max=100):
retry_delay = 0.1
retry_count = 0
while retry_count < retry_max:
if predicate():
return
else:
retry_count += 1
time.sleep(retry_delay)
assert predicate()
def getLanguageClientBuffers(nvim):
return [b for b in nvim.buffers if b.name.endswith("__LCNHover__")]
@pytest.fixture(scope="module")
def nvim() -> neovim.Nvim:
nvim = neovim.attach("socket", path=NVIM_LISTEN_ADDRESS)
time.sleep(1)
return nvim
@pytest.fixture(autouse=True)
def setup(nvim):
nvim.command("%bdelete!")
def test_textDocument_definition(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(10)
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_definition()
time.sleep(3)
assert nvim.current.window.cursor == [8, 3]
def test_textDocument_hover(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_hover()
time.sleep(1)
buf = getLanguageClientBuffers(nvim)[0]
expect = "fn greet() -> i32"
assert expect in "\n".join(buf)
def test_textDocument_rename(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
expect = [line.replace("greet", "hello") for line in nvim.current.buffer]
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_rename({"newName": "hello"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_rename_multiple_oneline(nvim):
nvim.command("edit! {}".format(PATH_LIBS_RS))
time.sleep(1)
expect = [line.replace("a", "x") for line in nvim.current.buffer]
nvim.funcs.cursor(4, 13)
# TODO: Test case where new variable length is different.
nvim.funcs.LanguageClient_textDocument_rename({"newName": "x"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("bd!")
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_rename_multiple_files(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(17, 5)
expect = [line.replace("yo", "hello") for line in nvim.current.buffer]
nvim.funcs.LanguageClient_textDocument_rename({"newName": "hello"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("bd!")
nvim.command("bd!")
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_documentSymbol(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(1, 1)
nvim.funcs.LanguageClient_textDocument_documentSymbol()
time.sleep(1)
assert nvim.funcs.getloclist(0)
nvim.command("3lnext")
assert nvim.current.window.cursor != [1, 1]
def test_workspace_symbol(nvim):
nvim.command("edit! {}".format(PATH_LIBS_RS))
time.sleep(1)
nvim.funcs.cursor(1, 1)
nvim.funcs.LanguageClient_workspace_symbol()
time.sleep(1)
assert nvim.funcs.getloclist(0)
nvim.command("1lnext")
assert nvim.current.window.cursor == [8, 0]
def test_textDocument_references(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(8, 6)
nvim.funcs.LanguageClient_textDocument_references()
time.sleep(1)
expect = ["fn greet() -> i32 {", """println!("{}", greet());"""]
assert [location["text"]
for location in nvim.funcs.getloclist(0)] == expect
nvim.command("lnext")
assert nvim.current.window.cursor == [3, 19]
def test_textDocument_references_modified_buffer(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(8, 6)
nvim.input("iabc")
time.sleep(1)
nvim.funcs.LanguageClient_textDocument_references()
time.sleep(1)
assert nvim.current.window.cursor == [8, 3]
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_languageClient_registerServerCommands(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.command('let g:responses = []')
nvim.command("call LanguageClient_registerServerCommands("
"{'bash': ['bash']}, g:responses)")
time.sleep(1)
assert nvim.vars['responses'][0]['result'] is None
def test_languageClient_registerHandlers(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.command('let g:responses = []')
nvim.command("call LanguageClient_registerHandlers("
"{'window/progress': 'HandleWindowProgress'}, g:responses)")
time.sleep(1)
assert nvim.vars['responses'][0]['result'] is None
# def test_languageClient_textDocument_codeAction(nvim):
# nvim.command("edit {}".format(PATH_CODEACTION))
# nvim.funcs.cursor(4, 14)
# assertRetry(lambda: len(nvim.funcs.getqflist()) == 1)
# nvim.funcs.LanguageClient_textDocument_codeAction()
# # Wait for fzf window showup.
# assertRetry(lambda:
# next((b for b in nvim.buffers
# if b.name.startswith('term://')), None) is not None)
# time.sleep(0.2)
# nvim.eval('feedkeys("\<CR>")')
# # Wait for fzf window dismiss.
# assertRetry(lambda:
# next((b for b in nvim.buffers
# if b.name.startswith('term://')), None) is None)
# assertRetry(lambda: len(nvim.funcs.getqflist()) == 0)
def _open_float_window(nvim):
nvim.funcs.cursor(3, 22)
pos = nvim.funcs.getpos('.')
nvim.funcs.LanguageClient_textDocument_hover()
time.sleep(1)
return pos
def test_textDocument_hover_float_window_closed_on_cursor_moved(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
buf = nvim.current.buffer
pos = _open_float_window(nvim)
float_buf = getLanguageClientBuffers(nvim)[0]
# Check if float window is open
float_winnr = nvim.funcs.bufwinnr(float_buf.number)
assert float_winnr > 0
# Check if cursor is not moved
assert buf.number == nvim.current.buffer.number
assert pos == nvim.funcs.getpos(".")
# Move cursor to left
nvim.funcs.cursor(13, 17)
# Check float window buffer was closed by CursorMoved
assert len(getLanguageClientBuffers(nvim)) == 0
def test_textDocument_hover_float_window_closed_on_entering_window(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
win_id = nvim.funcs.win_getid()
nvim.command("split")
try:
assert win_id != nvim.funcs.win_getid()
_open_float_window(nvim)
assert win_id != nvim.funcs.win_getid()
# Move to another window
nvim.funcs.win_gotoid(win_id)
assert win_id == nvim.funcs.win_getid()
# Check float window buffer was closed by BufEnter
assert len(getLanguageClientBuffers(nvim)) == 0
finally:
nvim.command("close!")
def test_textDocument_hover_float_window_closed_on_switching_to_buffer(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
# Create a new buffer
nvim.command("enew!")
another_bufnr = nvim.current.buffer.number
try:
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
source_bufnr = nvim.current.buffer.number
_open_float_window(nvim)
float_buf = getLanguageClientBuffers(nvim)[0]
float_winnr = nvim.funcs.bufwinnr(float_buf.number)
assert float_winnr > 0
assert nvim.current.buffer.number == source_bufnr
# Move to another buffer within the same window
nvim.command("buffer {}".format(another_bufnr))
assert nvim.current.buffer.number == another_bufnr
# Check float window buffer was closed by BufEnter
assert len(getLanguageClientBuffers(nvim)) == 0
finally:
nvim.command("bdelete! {}".format(another_bufnr))
def test_textDocument_hover_float_window_move_cursor_into_window(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
prev_bufnr = nvim.current.buffer.number
_open_float_window(nvim)
# Moves cursor into floating window
nvim.funcs.LanguageClient_textDocument_hover()
assert nvim.current.buffer.name.endswith("__LCNHover__")
# Close the window
nvim.command('close')
assert nvim.current.buffer.number == prev_bufnr
# Check float window buffer was closed by :close in the window
assert len(getLanguageClientBuffers(nvim)) == 0
|
autozimu/LanguageClient-neovim
|
tests/LanguageClient_test.py
|
Python
|
mit
| 9,546
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('applicants', '0010_auto_20151126_0525'),
]
operations = [
migrations.AddField(
model_name='applicant',
name='number_of_missed_calls',
field=models.IntegerField(default=0),
),
]
|
shailr/vms
|
applicants/migrations/0011_applicant_number_of_missed_calls.py
|
Python
|
gpl-2.0
| 421
| 0
|
"""
Author: Ali Hajimirza (ali@alihm.net)
Copyright Ali Hajimirza, free for use under MIT license.
"""
import csv
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from algorithm import EM
import argparse
def line_plot(data_arrays, xlabel, ylabel, labels, title, f):
"""
Plots a scatter chart.
Parameters
----------
data_arrays: 2d numpy array
Data to be plotted. This array consists of matrices of real values to be plotted.
Each row of this matrix will be plotted as a line on the graph.
xlabel: list of string
The list of categories on for the x axis labels. The length of this list should be equal to the
columns of the data_arrays.
ylabel: string
The label on the y axis.
labels: list of string
The labels for each category.
title: string
The title of the graph. Will be used as the name of the graph file.
dest: string, optional
Path to the directory to save the image
Returns
-------
None:
Saves the plot to the disk.
"""
plt.suptitle(title, fontsize=14)
plots = []
for data in data_arrays:
plot, = plt.plot(data)
plots.append(plot)
plt.legend(plots, labels, loc=2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(f, format="png")
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Divides data into the categories by ')
parser.add_argument('data', type=argparse.FileType("rb"), help='CSV file of data input')
args = parser.parse_args()
# reading the file
with args.data as csvfile:
reader = csv.reader(csvfile)
input_list = np.array(map(lambda line: np.array(map(lambda i: float(i), line)), reader))
x_list = input_list[:,0]
e_matrix = input_list[:,1:]
mean_matrix = EM.simulate_E_M(x_list, e_matrix, 100)
line_plot(mean_matrix, 'step', 'mean', ['Distribution 1','Distribution 2','Distribution 3'], 'E-M Learning' ,'sample_result.png' )
|
A92hm/expectation-maximization
|
demo.py
|
Python
|
mit
| 2,034
| 0.006391
|
#!/usr/bin/env python
# Copyright 2019 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START storage_remove_bucket_default_owner]
from google.cloud import storage
def remove_bucket_default_owner(bucket_name, user_email):
"""Removes a user from the access control list of the given bucket's
default object access control list."""
# bucket_name = "your-bucket-name"
# user_email = "name@example.com"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
# Reload fetches the current ACL from Cloud Storage.
bucket.acl.reload()
# You can also use `group`, `domain`, `all_authenticated` and `all` to
# remove access for different types of entities.
bucket.default_object_acl.user(user_email).revoke_read()
bucket.default_object_acl.user(user_email).revoke_write()
bucket.default_object_acl.user(user_email).revoke_owner()
bucket.default_object_acl.save()
print(
"Removed user {} from the default acl of bucket {}.".format(
user_email, bucket_name
)
)
# [END storage_remove_bucket_default_owner]
if __name__ == "__main__":
remove_bucket_default_owner(
bucket_name=sys.argv[1], user_email=sys.argv[2]
)
|
googleapis/python-storage
|
samples/snippets/storage_remove_bucket_default_owner.py
|
Python
|
apache-2.0
| 1,765
| 0
|
#!/usr/bin/env python
import os.path
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
=======
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
>>>>>>> Update README.md
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
description = 'Google Spreadsheets Python API'
long_description = """
{index}
License
-------
MIT
Download
========
"""
long_description = long_description.lstrip("\n").format(index=read('docs/index.txt'))
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
read('gspread/__init__.py'), re.MULTILINE).group(1)
setup(
name='gspread',
packages=['gspread'],
description=description,
long_description=long_description,
version=version,
author='Anton Burnashev',
author_email='fuss.here@gmail.com',
url='https://github.com/burnash/gspread',
keywords=['spreadsheets', 'google-spreadsheets'],
install_requires=['requests>=2.2.1'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Office/Business :: Financial :: Spreadsheet",
"Topic :: Software Development :: Libraries :: Python Modules"
],
license='MIT'
)
|
ShivaShinde/gspread
|
setup.py
|
Python
|
mit
| 1,845
| 0.015718
|
# Copyright 2014 Plexxi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class dataObject(object):
def __init__(self, data=None, version=0):
if data is None:
self.data = {}
else:
self.data = data
if version:
self.version = version
else:
self.version = int(bool(data))
def __str__(self):
return str(self.data)
class subData(object):
"""A piece of data that a data service is subscribed to.
Each data service in the cage can have its own instance of
this data; keep track of who published which instance.
"""
def __init__(self, key, dataindex, corrId, callback):
self.key = key
self.dataindex = dataindex
self.corrId = corrId
self.callback = callback
self.dataObjects = {}
# LOG.info(
# "*****New subdata: %s, %s, %s",
# key, dataindex, id(self.dataObjects))
def getSources(self):
return self.dataObjects.keys()
def update(self, sender, newdata):
self.dataObjects[sender] = newdata
def version(self, sender):
version = 0
if sender in self.dataObjects:
version = self.dataObjects[sender].version
return version
def getData(self, sender):
result = dataObject()
if sender in self.dataObjects:
LOG.info("subdata object: %s", self.dataObjects[sender])
result = self.dataObjects[sender]
return result
def getAllData(self):
result = {}
for sender in self.dataObjects:
result[sender] = self.dataObjects[sender]
return result
class pubData(object):
"""A piece of data that a data service is publishing.
Keep track of those data services that are subscribed.
"""
def __init__(self, dataindex, args={}):
self.dataindex = dataindex
self.dataObject = dataObject()
self.subscribers = {}
self.requesters = {}
self.args = args
def update(self, newdata):
version = self.dataObject.version + 1
self.dataObject = dataObject(newdata, version)
def get(self):
return self.dataObject
def version(self):
return self.dataObject.version
def addsubscriber(self, sender, type, corrId):
if sender not in self.subscribers:
self.subscribers[sender] = {}
self.subscribers[sender]['type'] = type
self.subscribers[sender]['correlationId'] = corrId
def removesubscriber(self, sender):
if sender in self.subscribers:
del self.subscribers[sender]
def getsubscribers(self, sender=""):
if sender:
if sender in self.subscribers:
return self.subscribers[sender]
else:
return []
else:
return self.subscribers
|
ekcs/congress
|
congress/dse/dataobj.py
|
Python
|
apache-2.0
| 3,470
| 0
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '07e15fa469ba384c7693fd246905547c',
'info_dict': {
'id': '2189178',
'ext': 'flv',
'title': 'Zeichentrick 1',
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
embed_page_url = self._search_regex(
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
webpage, 'embed page')
webpage = self._download_webpage(
embed_page_url, video_id, note='downloading embed page')
# Get the video URL
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
if m_playlist is not None:
playlist_url = m_playlist.group('playlist')
playlist_page = self._download_webpage(playlist_url, video_id,
'Downloading playlist page')
m_levels = list(re.finditer(r'<level bitrate="(\d+?)" file="(.*?)"', playlist_page))
if len(m_levels) == 0:
raise ExtractorError('Unable to extract video url')
videos = [(int(m.group(1)), m.group(2)) for m in m_levels]
(_, video_url) = sorted(videos)[0]
video_url = video_url.replace('%252F', '%2F')
else:
video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'player_url': embed_page_url,
'age_limit': age_limit,
}
|
nandhp/youtube-dl
|
youtube_dl/extractor/youjizz.py
|
Python
|
unlicense
| 2,297
| 0.001741
|
# -*- coding: utf8 -*-
#
# Copyright (C) 2014 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import test_purchase_group_by_period
|
odoousers2014/odoo-addons-supplier_price
|
purchase_group_by_period/tests/__init__.py
|
Python
|
agpl-3.0
| 821
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ibtokin.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
ibtokin/ibtokin
|
manage.py
|
Python
|
mit
| 805
| 0
|
# coding: latin1
## Copyright 2003-2007 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from lino.misc.tsttools import TestCase, main
from lino.apps.contacts.contacts_demo import startup
from lino.apps.contacts.contacts_tables import *
from lino.adamo.filters import NotEmpty
#from lino.apps.addrbook import demo
#from lino.apps.addrbook.tables import Partner
class Case(TestCase):
def test01(self):
db = startup()
s1 = ''
q = db.query(Contact,\
"name street city.name",
orderBy="name")
q.addColFilter('city',NotEmpty)
## for row in q:
## #print row[0]
## s1 += str(row[0]) + " "
## s1 += str(row[1]) + " "
## s1 += str(row[2]) + "\n"
## #print s1
## self.assertEqual(s1,"""\
## Arens None Eupen
## Ausdemwald None Aachen
## Bodard None Verviers
## Eesti Telefon Sõpruse pst. Tallinn
## Eierschal None Eupen
## Eierschal None Eupen
## Freitag None Eupen
## Girf OÜ Laki Tallinn
## Großmann None Eupen
## PAC Systems PGmbH Hütte Eupen
## Rumma & Ko OÜ Tartu mnt. Tallinn
## Saffre None Tallinn
## """)
s2 = ''
for row in q:
s2 += unicode(row.name) + " "
if row.street is not None:
s2 += unicode(row.street) + " "
s2 += unicode(row.city.name) + "\n"
#print s2
self.assertEquivalent(s2,u"""\
Andreas Arens Eupen
Anton Ausdemwald Aachen
Emil Eierschal Eupen
Erna Eierschal Eupen
Frédéric Freitag Eupen
Gerd Großmann Eupen
Hans Flott Bierstraße München
Henri Bodard Verviers
Kati Kask Tallinn
Kurtz & Büntig Bergstraße Eupen
Mets & puu OÜ Tartu mnt. Tallinn
Reisebüro Freitag Hütte Eupen
Tõnu Tamm Tallinn
""")
# some other cases (for example 80.py) would fail if run
# together with this case in one suite and if the following
# lines were not:
db.shutdown()
if __name__ == '__main__':
main()
|
MaxTyutyunnikov/lino
|
obsolete/tests/8.py
|
Python
|
gpl-3.0
| 2,702
| 0.018135
|
""" The provider bits of TileStache.
A Provider is the part of TileStache that actually renders imagery. A few default
providers are found here, but it's possible to define your own and pull them into
TileStache dynamically by class name.
Built-in providers:
- mapnik (Mapnik.ImageProvider)
- proxy (Proxy)
- vector (TileStache.Vector.Provider)
- url template (UrlTemplate)
- mbtiles (TileStache.MBTiles.Provider)
- mapnik grid (Mapnik.GridProvider)
Example built-in provider, for JSON configuration file:
"layer-name": {
"provider": {"name": "mapnik", "mapfile": "style.xml"},
...
}
Example external provider, for JSON configuration file:
"layer-name": {
"provider": {"class": "Module:Classname", "kwargs": {"frob": "yes"}},
...
}
- The "class" value is split up into module and classname, and dynamically
included. If this doesn't work for some reason, TileStache will fail loudly
to let you know.
- The "kwargs" value is fed to the class constructor as a dictionary of keyword
args. If your defined class doesn't accept any of these keyword arguments,
TileStache will throw an exception.
A provider must offer one of two methods for rendering map areas.
The renderTile() method draws a single tile at a time, and has these arguments:
- width, height: in pixels
- srs: projection as Proj4 string.
"+proj=longlat +ellps=WGS84 +datum=WGS84" is an example,
see http://spatialreference.org for more.
- coord: Coordinate object representing a single tile.
The renderArea() method draws a variably-sized area, and is used when drawing
metatiles. It has these arguments:
- width, height: in pixels
- srs: projection as Proj4 string.
"+proj=longlat +ellps=WGS84 +datum=WGS84" is an example,
see http://spatialreference.org for more.
- xmin, ymin, xmax, ymax: coordinates of bounding box in projected coordinates.
- zoom: zoom level of final map. Technically this can be derived from the other
arguments, but that's a hassle so we'll pass it in explicitly.
A provider may offer a method for custom response type, getTypeByExtension().
This method accepts a single argument, a filename extension string (e.g. "png",
"json", etc.) and returns a tuple with twon strings: a mime-type and a format.
Note that for image and non-image tiles alike, renderArea() and renderTile()
methods on a provider class must return a object with a save() method that
can accept a file-like object and a format name, e.g. this should word:
provder.renderArea(...).save(fp, "TEXT")
... if "TEXT" is a valid response format according to getTypeByExtension().
Non-image providers and metatiles do not mix.
For an example of a non-image provider, see TileStache.Vector.Provider.
"""
import os
import logging
try:
from io import BytesIO
except ImportError:
# Python 2
from StringIO import StringIO as BytesIO
from string import Template
try:
import urllib.request as urllib2
except ImportError:
# Python 2
import urllib2
import urllib
try:
from PIL import Image
except ImportError:
# On some systems, PIL.Image is known as Image.
import Image
import ModestMaps
from ModestMaps.Core import Point, Coordinate
from . import Geography
# This import should happen inside getProviderByName(), but when testing
# on Mac OS X features are missing from output. Wierd-ass C libraries...
try:
from . import Vector
except ImportError:
pass
# Already deprecated; provided for temporary backward-compatibility with
# old location of Mapnik provider. TODO: remove in next major version.
try:
from .Mapnik import ImageProvider as Mapnik
except ImportError:
pass
def getProviderByName(name):
""" Retrieve a provider object by name.
Raise an exception if the name doesn't work out.
"""
if name.lower() == 'mapnik':
from . import Mapnik
return Mapnik.ImageProvider
elif name.lower() == 'proxy':
return Proxy
elif name.lower() == 'url template':
return UrlTemplate
elif name.lower() == 'vector':
from . import Vector
return Vector.Provider
elif name.lower() == 'mbtiles':
from . import MBTiles
return MBTiles.Provider
elif name.lower() == 'mapnik grid':
from . import Mapnik
return Mapnik.GridProvider
elif name.lower() == 'sandwich':
from . import Sandwich
return Sandwich.Provider
raise Exception('Unknown provider name: "%s"' % name)
class Verbatim:
''' Wrapper for PIL.Image that saves raw input bytes if modes and formats match.
'''
def __init__(self, bytes):
self.buffer = BytesIO(bytes)
self.format = None
self._image = None
#
# Guess image format based on magic number, if possible.
# http://www.astro.keele.ac.uk/oldusers/rno/Computing/File_magic.html
#
magic = {
'\x89\x50\x4e\x47': 'PNG',
'\xff\xd8\xff\xe0': 'JPEG',
'\x47\x49\x46\x38': 'GIF',
'\x47\x49\x46\x38': 'GIF',
'\x4d\x4d\x00\x2a': 'TIFF',
'\x49\x49\x2a\x00': 'TIFF'
}
if bytes[:4] in magic:
self.format = magic[bytes[:4]]
else:
self.format = self.image().format
def image(self):
''' Return a guaranteed instance of PIL.Image.
'''
if self._image is None:
self._image = Image.open(self.buffer)
return self._image
def convert(self, mode):
if mode == self.image().mode:
return self
else:
return self.image().convert(mode)
def crop(self, bbox):
return self.image().crop(bbox)
def save(self, output, format):
if format == self.format:
output.write(self.buffer.getvalue())
else:
self.image().save(output, format)
class Proxy:
""" Proxy provider, to pass through and cache tiles from other places.
This provider is identified by the name "proxy" in the TileStache config.
Additional arguments:
- url (optional)
URL template for remote tiles, for example:
"http://tile.openstreetmap.org/{Z}/{X}/{Y}.png"
- provider (optional)
Provider name string from Modest Maps built-ins.
See ModestMaps.builtinProviders.keys() for a list.
Example: "OPENSTREETMAP".
- timeout (optional)
Defines a timeout in seconds for the request.
If not defined, the global default timeout setting will be used.
Either url or provider is required. When both are present, url wins.
Example configuration:
{
"name": "proxy",
"url": "http://tile.openstreetmap.org/{Z}/{X}/{Y}.png"
}
"""
def __init__(self, layer, url=None, provider_name=None, timeout=None):
""" Initialize Proxy provider with layer and url.
"""
if url:
self.provider = ModestMaps.Providers.TemplatedMercatorProvider(url)
elif provider_name:
if provider_name in ModestMaps.builtinProviders:
self.provider = ModestMaps.builtinProviders[provider_name]()
else:
raise Exception('Unkown Modest Maps provider: "%s"' % provider_name)
else:
raise Exception('Missing required url or provider parameter to Proxy provider')
self.timeout = timeout
@staticmethod
def prepareKeywordArgs(config_dict):
""" Convert configured parameters to keyword args for __init__().
"""
kwargs = dict()
if 'url' in config_dict:
kwargs['url'] = config_dict['url']
if 'provider' in config_dict:
kwargs['provider_name'] = config_dict['provider']
if 'timeout' in config_dict:
kwargs['timeout'] = config_dict['timeout']
return kwargs
def renderTile(self, width, height, srs, coord):
"""
"""
img = None
urls = self.provider.getTileUrls(coord)
# Tell urllib2 get proxies if set in the environment variables <protocol>_proxy
# see: https://docs.python.org/2/library/urllib2.html#urllib2.ProxyHandler
proxy_support = urllib2.ProxyHandler()
url_opener = urllib2.build_opener(proxy_support)
for url in urls:
body = url_opener.open(url, timeout=self.timeout).read()
tile = Verbatim(body)
if len(urls) == 1:
#
# if there is only one URL, don't bother
# with PIL's non-Porter-Duff alpha channeling.
#
return tile
elif img is None:
#
# for many URLs, paste them to a new image.
#
img = Image.new('RGBA', (width, height))
img.paste(tile, (0, 0), tile)
return img
class UrlTemplate:
""" Built-in URL Template provider. Proxies map images from WMS servers.
This provider is identified by the name "url template" in the TileStache config.
Additional arguments:
- template (required)
String with substitutions suitable for use in string.Template.
- referer (optional)
String to use in the "Referer" header when making HTTP requests.
- source projection (optional)
Projection to transform coordinates into before making request
- timeout (optional)
Defines a timeout in seconds for the request.
If not defined, the global default timeout setting will be used.
More on string substitutions:
- http://docs.python.org/library/string.html#template-strings
"""
def __init__(self, layer, template, referer=None, source_projection=None,
timeout=None):
""" Initialize a UrlTemplate provider with layer and template string.
http://docs.python.org/library/string.html#template-strings
"""
self.layer = layer
self.template = Template(template)
self.referer = referer
self.source_projection = source_projection
self.timeout = timeout
@staticmethod
def prepareKeywordArgs(config_dict):
""" Convert configured parameters to keyword args for __init__().
"""
kwargs = {'template': config_dict['template']}
if 'referer' in config_dict:
kwargs['referer'] = config_dict['referer']
if 'source projection' in config_dict:
kwargs['source_projection'] = Geography.getProjectionByName(config_dict['source projection'])
if 'timeout' in config_dict:
kwargs['timeout'] = config_dict['timeout']
return kwargs
def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom):
""" Return an image for an area.
Each argument (width, height, etc.) is substituted into the template.
"""
if self.source_projection is not None:
ne_location = self.layer.projection.projLocation(Point(xmax, ymax))
ne_point = self.source_projection.locationProj(ne_location)
ymax = ne_point.y
xmax = ne_point.x
sw_location = self.layer.projection.projLocation(Point(xmin, ymin))
sw_point = self.source_projection.locationProj(sw_location)
ymin = sw_point.y
xmin = sw_point.x
srs = self.source_projection.srs
mapping = {'width': width, 'height': height, 'srs': srs, 'zoom': zoom,
'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
href = self.template.safe_substitute(mapping)
req = urllib2.Request(href)
if self.referer:
req.add_header('Referer', self.referer)
body = urllib2.urlopen(req, timeout=self.timeout).read()
tile = Verbatim(body)
return tile
|
kartta-labs/mapwarper
|
lib/tilestache/TileStache-1.51.5/TileStache/Providers.py
|
Python
|
mit
| 12,088
| 0.002978
|
# -*- coding: utf-8 -*-
""" S3 Synchronization
@copyright: 2011-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import datetime
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from s3datetime import s3_parse_datetime, s3_utc
from s3rest import S3Method
from s3import import S3ImportItem
from s3query import S3URLQuery
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3SYNC: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class S3Sync(S3Method):
""" Synchronization Handler """
def __init__(self):
""" Constructor """
S3Method.__init__(self)
self.log = S3SyncLog()
self._config = None
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
RESTful method handler (repository/sync, repository/register)
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
output = dict()
if r.method == "sync":
if r.http == "GET":
# Incoming pull
output = self.__send(r, **attr)
elif r.http in ("PUT", "POST"):
# Incoming push
output = self.__receive(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif r.name == "repository" and r.method == "register":
if r.http == "GET":
# Incoming registration request
output = self.__register(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
# REST Methods:
# -------------------------------------------------------------------------
def __register(self, r, **attr):
"""
Respond to an incoming registration request
@param r: the S3Request
@param attr: the controller attributes
"""
log = self.log
result = log.SUCCESS
message = "registration successful"
repository_id = None
if "repository" in r.vars:
ruid = r.vars["repository"]
db = current.db
rtable = current.s3db.sync_repository
row = db(rtable.uuid == ruid).select(limitby=(0, 1)).first()
if row:
repository_id = row.id
if not row.accept_push and current.auth.s3_has_role("ADMIN"):
row.update_record(accept_push=True)
else:
if current.auth.s3_has_role("ADMIN"):
accept_push = True
else:
accept_push = False
repository_id = rtable.insert(name=ruid,
uuid=ruid,
accept_push=accept_push)
if not repository_id:
result = log.ERROR
message = "registration failed"
else:
result = log.ERROR
message = "no repository identifier specified"
if result == log.SUCCESS:
output = current.xml.json_message(message=message,
sender="%s" % self.config.uuid)
else:
output = current.xml.json_message(False, 400,
message=message,
sender="%s" % self.config.uuid)
# Set content type header
headers = current.response.headers
headers["Content-Type"] = "application/json"
# Log the operation
log.write(repository_id=repository_id,
resource_name=log.NONE,
transmission=log.IN,
mode=log.REGISTER,
result=result,
message=message)
return output
# -------------------------------------------------------------------------
def __send(self, r, **attr):
"""
Respond to an incoming pull
@param r: the S3Request
@param attr: the controller attributes
"""
mixed = attr.get("mixed", False)
get_vars = r.get_vars
resource = r.resource
# Identify the requesting repository
repository_uuid = get_vars.get("repository")
connector = None
if repository_uuid:
rtable = current.s3db.sync_repository
query = rtable.uuid == repository_uuid
row = current.db(query).select(limitby=(0, 1)).first()
if row:
connector = S3SyncRepository(row)
if connector is None:
# Use a dummy repository with Eden API
connector = S3SyncRepository(Storage(id = None,
name = "unknown",
apitype = "eden",
))
current.log.debug("S3Sync PULL from %s (%s)" % (connector.name,
connector.apitype))
# Additional export parameters
start = get_vars.get("start", None)
if start is not None:
try:
start = int(start)
except ValueError:
start = None
limit = get_vars.get("limit", None)
if limit is not None:
try:
limit = int(limit)
except ValueError:
limit = None
msince = get_vars.get("msince", None)
if msince is not None:
msince = s3_parse_datetime(msince)
# Sync filters from peer
filters = {}
for k, v in get_vars.items():
if k[0] == "[" and "]" in k:
tablename, urlvar = k[1:].split("]", 1)
if urlvar:
if not tablename or tablename == "~":
tablename = resource.tablename
f = filters.get(tablename, {})
u = f.get(urlvar, None)
if u:
u = "%s&%s" % (u, v)
else:
u = v
f[urlvar] = u
filters[tablename] = f
if not filters:
filters = None
try:
result = connector.send(resource,
start = start,
limit = limit,
msince = msince,
filters = filters,
mixed = mixed,
)
except NotImplementedError:
r.error(405, "Synchronization method not supported for repository")
log = self.log
log.write(repository_id = connector.id,
resource_name = "mixed" if mixed else resource.tablename,
transmission = log.IN,
mode = log.PULL,
action = "send",
remote = result.get("remote", False),
result = result.get("status", log.NONE),
message = result.get("message", ""),
)
return result.get("response")
# -------------------------------------------------------------------------
def __receive(self, r, **attr):
"""
Respond to an incoming push
@param r: the S3Request
@param attr: the controller attributes
"""
mixed = attr.get("mixed", False)
get_vars = r.get_vars
s3db = current.s3db
db = current.db
# Identify the sending repository
repository_uuid = get_vars.get("repository")
connector = None
if repository_uuid:
rtable = s3db.sync_repository
query = rtable.uuid == repository_uuid
row = current.db(query).select(limitby=(0, 1)).first()
if row:
connector = S3SyncRepository(row)
# Check that the repository is registered and allowed to push
if connector is None or not connector.accept_push:
r.error(403, current.ERROR.NOT_PERMITTED)
current.log.debug("S3Sync PUSH from %s (%s)" % (connector.name,
connector.apitype))
# Get strategy and policy
default_update_policy = S3ImportItem.POLICY.NEWER
default_conflict_policy = S3ImportItem.POLICY.MASTER
# Identify the synchronization task
ttable = s3db.sync_task
if not mixed:
query = (ttable.repository_id == connector.id) & \
(ttable.resource_name == r.tablename) & \
(ttable.deleted != True)
task = db(query).select(limitby=(0, 1)).first()
else:
task = None
last_sync = None
if task:
strategy = task.strategy
update_policy = task.update_policy or default_update_policy
conflict_policy = task.conflict_policy or default_conflict_policy
if update_policy not in ("THIS", "OTHER"):
last_sync = task.last_pull
else:
policies = S3ImportItem.POLICY
p = get_vars.get("update_policy", None)
values = {"THIS": "OTHER", "OTHER": "THIS"}
switch = lambda p: p in values and values[p] or p
if p and p in policies:
p = switch(p)
update_policy = policies[p]
else:
update_policy = default_update_policy
p = get_vars.get("conflict_policy", None)
if p and p in policies:
p = switch(p)
conflict_policy = policies[p]
else:
conflict_policy = default_conflict_policy
msince = get_vars.get("msince", None)
if msince is not None:
last_sync = s3_parse_datetime(msince)
s = get_vars.get("strategy", None)
if s:
s = str(s).split(",")
methods = S3ImportItem.METHOD
strategy = [method for method in methods.values()
if method in s]
else:
strategy = ttable.strategy.default
# Get the source
source = r.read_body()
# Import resource
resource = r.resource
try:
result = connector.receive(source,
resource,
strategy = strategy,
update_policy = update_policy,
conflict_policy = conflict_policy,
last_sync = last_sync,
onconflict = self.onconflict,
mixed = mixed,
)
except IOError:
current.auth.permission.fail()
except SyntaxError:
e = sys.exc_info()[1]
r.error(400, e)
except NotImplementedError:
r.error(405, "Synchronization method not supported for repository")
log = self.log
log.write(repository_id = connector.id,
resource_name = "mixed" if mixed else resource.tablename,
transmission = log.IN,
mode = log.PUSH,
action = "receive",
remote = result.get("remote", False),
result = result.get("status", log.NONE),
message = result.get("message", ""),
)
return result.get("response")
# -------------------------------------------------------------------------
# API Methods:
# -------------------------------------------------------------------------
def synchronize(self, repository):
"""
Synchronize with a repository, called from scheduler task
@param repository: the repository Row
@return: True if successful, False if there was an error
"""
current.log.debug("S3Sync: synchronize %s" % repository.url)
log = self.log
error = None
if repository.apitype == "filesync":
if not repository.path:
error = "No path set for repository"
else:
if not repository.url:
error = "No URL set for repository"
if error:
log.write(repository_id = repository.id,
resource_name = None,
transmission = None,
mode = log.NONE,
action = "connect",
remote = False,
result = self.log.FATAL,
message = error,
)
return False
ttable = current.s3db.sync_task
query = (ttable.repository_id == repository.id) & \
(ttable.deleted != True)
tasks = current.db(query).select()
connector = S3SyncRepository(repository)
error = connector.login()
if error:
log.write(repository_id = repository.id,
resource_name = None,
transmission = log.OUT,
mode = log.LOGIN,
action = "login",
remote = True,
result = log.FATAL,
message = error,
)
return False
# Activate UUID synchronisation if required
s3 = current.response.s3
s3.synchronise_uuids = connector.synchronise_uuids
success = True
for task in tasks:
# Pull
mtime = None
if task.mode in (1, 3):
error, mtime = connector.pull(task,
onconflict=self.onconflict,
)
if error:
success = False
current.log.debug("S3Sync: %s PULL error: %s" %
(task.resource_name, error))
continue
if mtime is not None:
task.update_record(last_pull=mtime)
# Push
mtime = None
if task.mode in (2, 3):
error, mtime = connector.push(task)
if error:
success = False
current.log.debug("S3Sync: %s PUSH error: %s" %
(task.resource_name, error))
continue
if mtime is not None:
task.update_record(last_push=mtime)
current.log.debug("S3Sync.synchronize: %s done" % task.resource_name)
s3.synchronise_uuids = False
return success
# -------------------------------------------------------------------------
@classmethod
def onconflict(cls, item, repository, resource):
"""
Automatic conflict resolution
@param item: the conflicting import item
@param repository: the repository the item comes from
@param resource: the resource the item shall be imported to
"""
s3db = current.s3db
tablename = resource.tablename
resolver = s3db.get_config(tablename, "onconflict")
_debug("Resolving conflict in %s" % resource.tablename)
_debug("Repository: %s" % repository.name)
_debug("Conflicting item: %s" % item)
_debug("Method: %s" % item.method)
if resolver:
_debug("Applying custom rule")
resolver(item, repository, resource)
if item.conflict:
_debug("Do not accept")
else:
_debug("Accept per custom rule")
else:
_debug("Applying default rule")
ttable = s3db.sync_task
policies = S3ImportItem.POLICY
query = (ttable.repository_id == repository.id) & \
(ttable.resource_name == tablename) & \
(ttable.deleted != True)
task = current.db(query).select(limitby=(0, 1)).first()
if task and item.original:
original = item.original
conflict_policy = task.conflict_policy
if conflict_policy == policies.OTHER:
# Always accept
_debug("Accept by default")
item.conflict = False
elif conflict_policy == policies.NEWER:
# Accept if newer
xml = current.xml
if xml.MTIME in original and \
s3_utc(original[xml.MTIME]) <= item.mtime:
_debug("Accept because newer")
item.conflict = False
else:
_debug("Do not accept")
elif conflict_policy == policies.MASTER:
# Accept if master
if current.xml.MCI in original and \
original.mci == 0 or item.mci == 1:
_debug("Accept because master")
item.conflict = False
else:
_debug("Do not accept")
else:
# Never accept
_debug("Do not accept")
pass
else:
# No rule - accept always
_debug("Accept because no rule found")
item.conflict = False
# -------------------------------------------------------------------------
# Utility methods:
# -------------------------------------------------------------------------
@property
def config(self):
""" Lazy access to the current sync config """
if self._config is None:
table = current.s3db.sync_config
row = current.db().select(table.ALL, limitby=(0, 1)).first()
self._config = row
return self._config
# -------------------------------------------------------------------------
def get_status(self):
""" Read the current sync status """
table = current.s3db.sync_status
row = current.db().select(table.ALL, limitby=(0, 1)).first()
if not row:
row = Storage()
return row
# -------------------------------------------------------------------------
def set_status(self, **attr):
""" Update the current sync status """
table = current.s3db.sync_status
data = dict((k, attr[k]) for k in attr if k in table.fields)
data["timestmp"] = datetime.datetime.utcnow()
row = current.db().select(table._id, limitby=(0, 1)).first()
if row:
row.update_record(**data)
else:
table.insert(**data)
row = data
return row
# -------------------------------------------------------------------------
@staticmethod
def get_filters(task_id):
"""
Get all filters for a synchronization task
@param task_id: the task ID
@return: a dict of dicts like {tablename: {url_var: value}}
"""
db = current.db
s3db = current.s3db
ftable = s3db.sync_resource_filter
query = (ftable.task_id == task_id) & \
(ftable.deleted != True)
rows = db(query).select(ftable.tablename,
ftable.filter_string)
filters = {}
for row in rows:
tablename = row.tablename
if tablename in filters:
filters[tablename] = "%s&%s" % (filters[tablename],
row.filter_string)
else:
filters[tablename] = row.filter_string
parse_url = S3URLQuery.parse_url
for tablename in filters:
filters[tablename] = parse_url(filters[tablename])
return filters
# =============================================================================
class S3SyncLog(S3Method):
""" Synchronization Logger """
TABLENAME = "sync_log"
# Outcomes
SUCCESS = "success"
WARNING = "warning"
ERROR = "error"
FATAL = "fatal"
# Transmissions
IN = "incoming"
OUT = "outgoing"
# Methods
PULL = "pull"
PUSH = "push"
LOGIN = "login"
REGISTER = "register"
# None
NONE = "none"
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
RESTful method handler
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
output = dict()
resource = r.resource
if resource.tablename == self.TABLENAME:
return resource.crud.select(r, **attr)
elif resource.tablename == "sync_repository":
# READ for sync log for this repository (currently not needed)
pass
else:
if r.interactive:
# READ for sync log for this resource
here = "%s.%s" % (r.controller, r.function)
sync_log = current.s3db[self.TABLENAME]
sync_log.resource_name.readable = False
query = (sync_log.resource_name == resource.tablename)
r = r.factory(prefix="sync", name="log", args=[])
s3 = current.response.s3
s3.filter = query
s3.prep = None
s3.postp = None
s3.actions = [
dict(label=str(current.T("Details")),
_class="action-btn",
url=URL(c="sync", f="log",
args=["[id]"],
vars={"return":here}))
]
output = r(subtitle=None,
rheader=self.rheader)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
@classmethod
def write(cls,
repository_id=None,
resource_name=None,
transmission=None,
mode=None,
action=None,
result=None,
remote=False,
message=None):
"""
Writes a new entry to the log
@param repository_id: the repository record ID
@param resource_name: the resource name
@param transmission: transmission mode (IN, OUT or None)
@param mode: synchronization mode (PULL, PUSH or None)
@param action: action that triggers the log entry (if any)
@param result: the result of the transaction
(SUCCESS, WARNING, ERROR or FATAL)
@param remote: boolean, True if this is a remote error
@param message: clear text message
"""
if result not in (cls.SUCCESS, cls.WARNING, cls.ERROR, cls.FATAL):
result = cls.SUCCESS
if result == cls.SUCCESS:
# Can't be a remote error if it's not an error at all
remote = False
if transmission not in (cls.IN, cls.OUT):
transmission = cls.NONE
if mode not in (cls.PULL, cls.PUSH, cls.LOGIN, cls.REGISTER):
mode = cls.NONE
if not action:
action = cls.NONE
entry = {"timestmp": datetime.datetime.utcnow(),
"repository_id": repository_id,
"resource_name": resource_name,
"mode": "%s/%s" % (mode, transmission),
"action": action,
"result": result,
"remote": remote,
"message": message,
}
current.s3db[cls.TABLENAME].insert(**entry)
# -------------------------------------------------------------------------
@staticmethod
def rheader(r, **attr):
""" S3SyncLog resource header """
if r.id is None:
return DIV(current.T("Showing latest entries first"))
else:
return None
# =============================================================================
class S3SyncRepository(object):
""" Class representation a peer repository """
def __init__(self, repository):
"""
Constructor
@param repository: the repository record (Row)
"""
# Logger and Config
self.log = S3SyncLog
self._config = None
# Identifier and name
self.id = repository.id
self.name = repository.name
# API type and import/export backend
self.apitype = repository.apitype
self.backend = repository.backend
# URL / Path
self.url = repository.url
self.path = repository.path
# Authentication
self.username = repository.username
self.password = repository.password
self.client_id = repository.client_id
self.client_secret = repository.client_secret
self.site_key = repository.site_key
self.refresh_token = repository.refresh_token
# Network
self.proxy = repository.proxy
# Processing Options
self.accept_push = repository.accept_push
self.synchronise_uuids = repository.synchronise_uuids
self.keep_source = repository.keep_source
# Instantiate Adapter
import sync_adapter
api = sync_adapter.__dict__.get(self.apitype)
if api:
adapter = api.S3SyncAdapter(self)
else:
adapter = S3SyncBaseAdapter(self)
self.adapter = adapter
# -------------------------------------------------------------------------
@property
def config(self):
""" Lazy access to the current sync config """
if self._config is None:
table = current.s3db.sync_config
row = current.db().select(table.ALL, limitby=(0, 1)).first()
self._config = row
return self._config
# -------------------------------------------------------------------------
def __getattr__(self, name):
"""
Delegate other attributes and methods to the adapter
@param name: the attribute/method
"""
return object.__getattribute__(self.adapter, name)
# =============================================================================
class S3SyncBaseAdapter(object):
"""
Sync Adapter (base class) - interface providing standard
synchronization methods for the respective repository type.
This class isn't meant to be instantiated or accessed directly,
but is normally accessed through the S3SyncRepository instance.
"""
def __init__(self, repository):
"""
Constructor
@param repository: the repository (S3Repository instance)
"""
self.repository = repository
self.log = repository.log
# -------------------------------------------------------------------------
# Methods to be implemented by subclasses:
# -------------------------------------------------------------------------
def register(self):
"""
Register this site at the peer repository
@return: True to indicate success, otherwise False
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def login(self):
"""
Login at the peer repository
@return: None if successful, otherwise the error
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def pull(self, task, onconflict=None):
"""
Fetch updates from the peer repository and import them
into the local database (active pull)
@param task: the synchronization task (sync_task Row)
@param onconflict: callback for automatic conflict resolution
@return: tuple (error, mtime), with error=None if successful,
else error=message, and mtime=modification timestamp
of the youngest record sent
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def push(self, task):
"""
Extract new updates from the local database and send
them to the peer repository (active push)
@param task: the synchronization task (sync_task Row)
@return: tuple (error, mtime), with error=None if successful,
else error=message, and mtime=modification timestamp
of the youngest record sent
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def send(self,
resource,
start=None,
limit=None,
msince=None,
filters=None,
mixed=False):
"""
Respond to an incoming pull from the peer repository
@param resource: the resource to be synchronized
@param start: index of the first record to send
@param limit: maximum number of records to send
@param msince: minimum modification date/time for records to send
@param filters: URL filters for record extraction
@param mixed: negotiate resource with peer (disregard resource)
@return: a dict {status, remote, message, response}, with:
- status....the outcome of the operation
- remote....whether the error was remote (or local)
- message...the log message
- response..the response to send to the peer
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def receive(self,
source,
resource,
strategy=None,
update_policy=None,
conflict_policy=None,
onconflict=None,
last_sync=None,
mixed=False):
"""
Respond to an incoming push from the peer repository
@param source: the input stream (list of file-like objects)
@param resource: the target resource
@param strategy: the import strategy
@param update_policy: the update policy
@param conflict_policy: the conflict resolution policy
@param onconflict: callback for conflict resolution
@param last_sync: the last synchronization date/time for the peer
@param mixed: negotiate resource with peer (disregard resource)
@return: a dict {status, remote, message, response}, with:
- status....the outcome of the operation
- remote....whether the error was remote (or local)
- message...the log message
- response..the response to send to the peer
"""
raise NotImplementedError
# End =========================================================================
|
sahana/Turkey
|
modules/s3/s3sync.py
|
Python
|
mit
| 33,252
| 0.003248
|
"""
A test module which has a required module and a config
"""
TYPE = "Test"
NAME = "test_2"
REQUIRES = ["test_1"]
DEFAULTCONF = {'a': 1, 'b': 2}
def check(conf=DEFAULTCONF):
if None in REQUIRES:
return False
return True
def scan(filelist, conf=DEFAULTCONF):
results = []
result1, meta1 = REQUIRES[0]
result1 = dict(result1)
for fname in filelist:
if fname in result1:
results.append((fname, True))
else:
results.append((fname, fname))
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
metadata["Include"] = True
return results, metadata
|
jmlong1027/multiscanner
|
tests/modules/test_2.py
|
Python
|
mpl-2.0
| 649
| 0
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Python, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import copy
import os
import re
import fileinput
import sys
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_libdir, get_software_libdir, get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
EXTS_FILTER_PYTHON_PACKAGES = ('python -c "import %(ext_name)s"', "")
class EB_Python(ConfigureMake):
"""Support for building/installing Python
- default configure/build_step/make install works fine
To extend Python by adding extra packages there are two ways:
- list the packages in the exts_list, this will include the packages in this Python installation
- create a seperate easyblock, so the packages can be loaded with module load
e.g., you can include numpy and scipy in a default Python installation
but also provide newer updated numpy and scipy versions by creating a PythonPackage-derived easyblock for it.
"""
def prepare_for_extensions(self):
"""
Set default class and filter for Python packages
"""
# build and install additional packages with PythonPackage easyblock
self.cfg['exts_defaultclass'] = "PythonPackage"
self.cfg['exts_filter'] = EXTS_FILTER_PYTHON_PACKAGES
# don't pass down any build/install options that may have been specified
# 'make' options do not make sense for when building/installing Python libraries (usually via 'python setup.py')
msg = "Unsetting '%s' easyconfig parameter before building/installing extensions: %s"
for param in ['buildopts', 'installopts']:
if self.cfg[param]:
self.log.debug(msg, param, self.cfg[param])
self.cfg[param] = ''
def configure_step(self):
"""Set extra configure options."""
self.cfg.update('configopts', "--with-threads --enable-shared")
# Need to be careful to match the unicode settings to the underlying python
if sys.maxunicode == 1114111:
self.cfg.update('configopts', "--enable-unicode=ucs4")
elif sys.maxunicode == 65535:
self.cfg.update('configopts', "--enable-unicode=ucs2")
else:
raise EasyBuildError("Unknown maxunicode value for your python: %d" % sys.maxunicode)
modules_setup_dist = os.path.join(self.cfg['start_dir'], 'Modules', 'Setup.dist')
libreadline = get_software_root('libreadline')
if libreadline:
ncurses = get_software_root('ncurses')
if ncurses:
readline_libdir = get_software_libdir('libreadline')
ncurses_libdir = get_software_libdir('ncurses')
readline_static_lib = os.path.join(libreadline, readline_libdir, 'libreadline.a')
ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, 'libncurses.a')
readline = "readline readline.c %s %s" % (readline_static_lib, ncurses_static_lib)
for line in fileinput.input(modules_setup_dist, inplace='1', backup='.readline'):
line = re.sub(r"^#readline readline.c.*", readline, line)
sys.stdout.write(line)
else:
raise EasyBuildError("Both libreadline and ncurses are required to ensure readline support")
openssl = get_software_root('OpenSSL')
if openssl:
for line in fileinput.input(modules_setup_dist, inplace='1', backup='.ssl'):
line = re.sub(r"^#SSL=.*", "SSL=%s" % openssl, line)
line = re.sub(r"^#(\s*-DUSE_SSL -I)", r"\1", line)
line = re.sub(r"^#(\s*-L\$\(SSL\)/lib )", r"\1 -L$(SSL)/lib64 ", line)
sys.stdout.write(line)
tcl = get_software_root('Tcl')
tk = get_software_root('Tk')
if tcl and tk:
tclver = get_software_version('Tcl')
tkver = get_software_version('Tk')
tcltk_maj_min_ver = '.'.join(tclver.split('.')[:2])
if tcltk_maj_min_ver != '.'.join(tkver.split('.')[:2]):
raise EasyBuildError("Tcl and Tk major/minor versions don't match: %s vs %s", tclver, tkver)
self.cfg.update('configopts', "--with-tcltk-includes='-I%s/include -I%s/include'" % (tcl, tk))
tcl_libdir = os.path.join(tcl, get_software_libdir('Tcl'))
tk_libdir = os.path.join(tk, get_software_libdir('Tk'))
tcltk_libs = "-L%(tcl_libdir)s -L%(tk_libdir)s -ltcl%(maj_min_ver)s -ltk%(maj_min_ver)s" % {
'tcl_libdir': tcl_libdir,
'tk_libdir': tk_libdir,
'maj_min_ver': tcltk_maj_min_ver,
}
self.cfg.update('configopts', "--with-tcltk-libs='%s'" % tcltk_libs)
super(EB_Python, self).configure_step()
def install_step(self):
"""Extend make install to make sure that the 'python' command is present."""
super(EB_Python, self).install_step()
python_binary_path = os.path.join(self.installdir, 'bin', 'python')
if not os.path.isfile(python_binary_path):
pythonver = '.'.join(self.version.split('.')[0:2])
srcbin = "%s%s" % (python_binary_path, pythonver)
try:
os.symlink(srcbin, python_binary_path)
except OSError, err:
raise EasyBuildError("Failed to symlink %s to %s: %s", srcbin, python_binary_path, err)
def sanity_check_step(self):
"""Custom sanity check for Python."""
pyver = "python%s" % '.'.join(self.version.split('.')[0:2])
try:
fake_mod_data = self.load_fake_module()
except EasyBuildError, err:
raise EasyBuildError("Loading fake module failed: %s", err)
abiflags = ''
if LooseVersion(self.version) >= LooseVersion("3"):
run_cmd("which python", log_all=True, simple=False)
cmd = 'python -c "import sysconfig; print(sysconfig.get_config_var(\'abiflags\'));"'
(abiflags, _) = run_cmd(cmd, log_all=True, simple=False)
if not abiflags:
raise EasyBuildError("Failed to determine abiflags: %s", abiflags)
else:
abiflags = abiflags.strip()
custom_paths = {
'files': ["bin/%s" % pyver, "lib/lib%s%s.%s" % (pyver, abiflags, get_shared_lib_ext())],
'dirs': ["include/%s%s" % (pyver, abiflags), "lib/%s" % pyver],
}
# cleanup
self.clean_up_fake_module(fake_mod_data)
custom_commands = [
('python', '--version'),
('python', '-c "import _ctypes"'), # make sure that foreign function interface (libffi) works
('python', '-c "import _ssl"'), # make sure SSL support is enabled one way or another
('python', '-c "import readline"'), # make sure readline support was built correctly
]
super(EB_Python, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
|
wpoely86/easybuild-easyblocks
|
easybuild/easyblocks/p/python.py
|
Python
|
gpl-2.0
| 8,497
| 0.003884
|
import hashlib
import os
import re
import time
import uuid
import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.staticfiles.finders import find as find_static_path
from olympia.lib.jingo_minify_helpers import ensure_path_exists
def run_command(command):
"""Run a command and correctly poll the output and write that to stdout"""
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip())
return process.poll()
class Command(BaseCommand):
help = ('Compresses css and js assets defined in settings.MINIFY_BUNDLES')
# This command must not do any system checks because Django runs db-field
# related checks since 1.10 which require a working MySQL connection.
# We don't have that during our docker builds and since `compress_assets`
# is being used while building our docker images we have to disable them.
requires_system_checks = False
checked_hash = {}
bundle_hashes = {}
missing_files = 0
minify_skipped = 0
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument(
'force', action='store_true',
help='Ignores modified/created dates and forces compression.')
def generate_build_id(self):
return uuid.uuid4().hex[:8]
def update_hashes(self):
# Adds a time based hash on to the build id.
self.build_id = '%s-%s' % (
self.generate_build_id(), hex(int(time.time()))[2:])
build_id_file = os.path.realpath(
os.path.join(settings.ROOT, 'build.py'))
with open(build_id_file, 'w') as f:
f.write('BUILD_ID_CSS = "%s"\n' % self.build_id)
f.write('BUILD_ID_JS = "%s"\n' % self.build_id)
f.write('BUILD_ID_IMG = "%s"\n' % self.build_id)
f.write('BUNDLE_HASHES = %s\n' % self.bundle_hashes)
def handle(self, **options):
self.force_compress = options.get('force', False)
# This will loop through every bundle, and do the following:
# - Concat all files into one
# - Cache bust all images in CSS files
# - Minify the concatted files
for ftype, bundle in settings.MINIFY_BUNDLES.iteritems():
for name, files in bundle.iteritems():
# Set the paths to the files.
concatted_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-all.%s' % (name, ftype,))
compressed_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-min.%s' % (name, ftype,))
ensure_path_exists(concatted_file)
ensure_path_exists(compressed_file)
files_all = []
for fn in files:
processed = self._preprocess_file(fn)
# If the file can't be processed, we skip it.
if processed is not None:
files_all.append(processed)
# Concat all the files.
tmp_concatted = '%s.tmp' % concatted_file
if len(files_all) == 0:
raise CommandError(
'No input files specified in '
'MINIFY_BUNDLES["%s"]["%s"] in settings.py!' %
(ftype, name)
)
run_command('cat {files} > {tmp}'.format(
files=' '.join(files_all),
tmp=tmp_concatted
))
# Cache bust individual images in the CSS.
if ftype == 'css':
bundle_hash = self._cachebust(tmp_concatted, name)
self.bundle_hashes['%s:%s' % (ftype, name)] = bundle_hash
# Compresses the concatenations.
is_changed = self._is_changed(concatted_file)
self._clean_tmp(concatted_file)
if is_changed or not os.path.isfile(compressed_file):
self._minify(ftype, concatted_file, compressed_file)
else:
print(
'File unchanged, skipping minification of %s' % (
concatted_file))
self.minify_skipped += 1
# Write out the hashes
self.update_hashes()
if self.minify_skipped:
print(
'Unchanged files skipped for minification: %s' % (
self.minify_skipped))
def _preprocess_file(self, filename):
"""Preprocess files and return new filenames."""
css_bin = filename.endswith('.less') and settings.LESS_BIN
source = find_static_path(filename)
target = source
if css_bin:
target = '%s.css' % source
run_command('{lessc} {source} {target}'.format(
lessc=css_bin,
source=str(source),
target=str(target)))
return target
def _is_changed(self, concatted_file):
"""Check if the file has been changed."""
if self.force_compress:
return True
tmp_concatted = '%s.tmp' % concatted_file
file_exists = (
os.path.exists(concatted_file) and
os.path.getsize(concatted_file) == os.path.getsize(tmp_concatted))
if file_exists:
orig_hash = self._file_hash(concatted_file)
temp_hash = self._file_hash(tmp_concatted)
return orig_hash != temp_hash
return True # Different filesize, so it was definitely changed
def _clean_tmp(self, concatted_file):
"""Replace the old file with the temp file."""
tmp_concatted = '%s.tmp' % concatted_file
if os.path.exists(concatted_file):
os.remove(concatted_file)
os.rename(tmp_concatted, concatted_file)
def _cachebust(self, css_file, bundle_name):
"""Cache bust images. Return a new bundle hash."""
self.stdout.write(
'Cache busting images in %s\n' % re.sub('.tmp$', '', css_file))
if not os.path.exists(css_file):
return
css_content = ''
with open(css_file, 'r') as css_in:
css_content = css_in.read()
def _parse(url):
return self._cachebust_regex(url, css_file)
css_parsed = re.sub('url\(([^)]*?)\)', _parse, css_content)
with open(css_file, 'w') as css_out:
css_out.write(css_parsed)
# Return bundle hash for cachebusting JS/CSS files.
file_hash = hashlib.md5(css_parsed).hexdigest()[0:7]
self.checked_hash[css_file] = file_hash
if self.missing_files:
self.stdout.write(
' - Error finding %s images\n' % (self.missing_files,))
self.missing_files = 0
return file_hash
def _minify(self, ftype, file_in, file_out):
"""Run the proper minifier on the file."""
if ftype == 'js' and hasattr(settings, 'UGLIFY_BIN'):
opts = {'method': 'UglifyJS', 'bin': settings.UGLIFY_BIN}
run_command('{uglify} -v -o {target} {source} -m'.format(
uglify=opts['bin'],
target=file_out,
source=file_in))
elif ftype == 'css' and hasattr(settings, 'CLEANCSS_BIN'):
opts = {'method': 'clean-css', 'bin': settings.CLEANCSS_BIN}
run_command('{cleancss} -o {target} {source}'.format(
cleancss=opts['bin'],
target=file_out,
source=file_in))
self.stdout.write(
'Minifying %s (using %s)\n' % (file_in, opts['method']))
def _file_hash(self, url):
"""Open the file and get a hash of it."""
if url in self.checked_hash:
return self.checked_hash[url]
file_hash = ''
try:
with open(url) as f:
file_hash = hashlib.md5(f.read()).hexdigest()[0:7]
except IOError:
self.missing_files += 1
self.stdout.write(' - Could not find file %s\n' % url)
self.checked_hash[url] = file_hash
return file_hash
def _cachebust_regex(self, img, parent):
"""Run over the regex; img is the structural regex object."""
url = img.group(1).strip('"\'')
if url.startswith('data:') or url.startswith('http'):
return 'url(%s)' % url
url = url.split('?')[0]
full_url = os.path.join(
settings.ROOT, os.path.dirname(parent), url)
return 'url(%s?%s)' % (url, self._file_hash(full_url))
|
atiqueahmedziad/addons-server
|
src/olympia/amo/management/commands/compress_assets.py
|
Python
|
bsd-3-clause
| 8,858
| 0.000226
|
import re
class UnknowItem(Exception):
pass
KEYWORDS = ("and", "as", "assert", "break", "class", "continue", "def", "del", "elif", "else", "except", "exec", "finally", "for", "from", "global", "if", "import", "in", "is", "lambda", "not", "or", "pass", "print", "raise", "return", "try", "while", "with", "yield")
TOKENS = (
(r'[a-zA-Z_]\w*', 'NAME'),
(r'0', 'INT'),
(r'[-+]?\d+[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'[-+]?\d+.\d?[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'[-+]?\d?.\d+[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'\d+[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\d+\.\d*[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\.\d+[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\d*\.\d+[jJ]', 'COMPLEX'),
(r'\d+\.[jJ]', 'COMPLEX'),
(r'\d+[jJ]', 'COMPLEX'),
(r'\d+\.', 'FLOAT'),
(r'\d*\.\d+[lL]?', 'FLOAT'),
(r'\d+\.\d*[lL]?', 'FLOAT'),
(r'\.', 'DOT'),
(r'[1-9]+\d*[lL]', 'LONG'),
(r'[1-9]+\d*', 'INT'),
(r'0[xX][\da-fA-F]+[lL]?', 'HEXA'),
(r'(0[oO][0-7]+)|(0[0-7]*)[lL]?', 'OCTA'),
(r'0[bB][01]+[lL]?', 'BINARY'),
(r'\(', 'LEFT_PARENTHESIS'),
(r'\)', 'RIGHT_PARENTHESIS'),
(r':', 'COLON'),
(r',', 'COMMA'),
(r';', 'SEMICOLON'),
(r'@', 'AT'),
(r'\+', 'PLUS'),
(r'-', 'MINUS'),
(r'\*', 'STAR'),
(r'/', 'SLASH'),
(r'\|', 'VBAR'),
(r'&', 'AMPER'),
(r'<', 'LESS'),
(r'>', 'GREATER'),
(r'=', 'EQUAL'),
(r'%', 'PERCENT'),
(r'\[', 'LEFT_SQUARE_BRACKET'),
(r'\]', 'RIGHT_SQUARE_BRACKET'),
(r'\{', 'LEFT_BRACKET'),
(r'\}', 'RIGHT_BRACKET'),
(r'`', 'BACKQUOTE'),
(r'==', 'EQUAL_EQUAL'),
(r'<>', 'NOT_EQUAL'),
(r'!=', 'NOT_EQUAL'),
(r'<=', 'LESS_EQUAL'),
(r'>=', 'GREATER_EQUAL'),
(r'~', 'TILDE'),
(r'\^', 'CIRCUMFLEX'),
(r'<<', 'LEFT_SHIFT'),
(r'>>', 'RIGHT_SHIFT'),
(r'\*\*', 'DOUBLE_STAR'),
(r'\+=', 'PLUS_EQUAL'),
(r'-=', 'MINUS_EQUAL'),
(r'\*=', 'STAR_EQUAL'),
(r'/=', 'SLASH_EQUAL'),
(r'%=', 'PERCENT_EQUAL'),
(r'&=', 'AMPER_EQUAL'),
(r'\|=', 'VBAR_EQUAL'),
(r'\^=', 'CIRCUMFLEX_EQUAL'),
(r'<<=', 'LEFT_SHIFT_EQUAL'),
(r'>>=', 'RIGHT_SHIFT_EQUAL'),
(r'\*\*=', 'DOUBLE_STAR_EQUAL'),
(r'//', 'DOUBLE_SLASH'),
(r'//=', 'DOUBLE_SLASH_EQUAL'),
(r'\n', 'ENDL'),
(r'\r\n', 'ENDL'),
(r'#.*', 'COMMENT'),
(r'(\s|\\\n|\\\r\n)+', 'SPACE'),
(r'["\'](.|\n|\r)*["\']', 'STRING'),
(r'[uU]["\'](.|\n|\r)*["\']', 'UNICODE_STRING'),
(r'[rR]["\'](.|\n|\r)*["\']', 'RAW_STRING'),
(r'[bB]["\'](.|\n|\r)*["\']', 'BINARY_STRING'),
(r'[uU][rR]["\'](.|\n|\r)*["\']', 'UNICODE_RAW_STRING'),
(r'[bB][rR]["\'](.|\n|\r)*["\']', 'BINARY_RAW_STRING'),
)
TOKENS = [(re.compile('^' + x[0] + '$'), x[1]) for x in TOKENS]
def tokenize(sequence, print_function=False):
return list(tokenize_generator(sequence, print_function))
def tokenize_current_keywords(print_function=False):
if print_function is True:
return [x for x in KEYWORDS if x != "print"]
else:
return KEYWORDS
def tokenize_generator(sequence, print_function=False):
current_keywords = tokenize_current_keywords()
for item in sequence:
if item in current_keywords:
yield (item.upper(), item)
continue
for candidate, token_name in TOKENS:
if candidate.match(item):
yield (token_name, item)
break
else:
raise UnknowItem("Can't find a matching token for this item: '%s'" % item)
yield ('ENDMARKER', '')
yield
|
cbonoz/codehealth
|
dependencies/baron/tokenizer.py
|
Python
|
mit
| 3,585
| 0.000837
|
import os
import sys
from github import Github
from github.GithubException import GithubException
def tag_to_tag():
SRC_TAG=os.environ.get('SRC_TAG')
ORG_NAME=os.environ.get('ORG_NAME')
REPO_NAME=os.environ.get('REPO_NAME')
USERNAME=os.environ.get('USERNAME')
PASSWORD=os.environ.get('PASSWORD')
TAG=os.environ.get('TAG')
print 'Attempting to create tag %s from tag %s' % (TAG, SRC_TAG)
g = Github(USERNAME,PASSWORD)
org = g.get_organization(ORG_NAME)
repo = org.get_repo(REPO_NAME)
# Get the source tag by name, error if none found
src_tag = None
for tag in repo.get_tags():
print tag.name
if tag.name == SRC_TAG:
src_tag = tag
break
if not src_tag:
print 'No tag named %s found' % SRC_TAG
exit(1)
tag = repo.create_git_tag(TAG, 'Created from tag %s' % SRC_TAG, src_tag.commit.sha, 'commit')
print 'Tag Created:'
print tag._rawData
# Could not figure out how to look up the existing but decided against it
# anyhow as Jenkins shouldn't be rewriting git tags automatically. If a tag
# needs to be overwritten, it must first be manually deleted
# Delete the existing ref
#existing_ref = repo.get_git_ref('tag/%s' % TAG)
#if existing_ref:
# print 'Existing ref found, deleting it to set new one'
# existing_ref.delete()
ref = repo.create_git_ref('refs/tags/%s' % TAG, tag.sha)
print 'Ref Created:'
print ref._rawData
print 'SUCCESS'
if __name__ == '__main__':
try:
tag_to_tag()
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(1)
|
jorrit-steporange/CumulusCI
|
ci/github/tag_to_tag.py
|
Python
|
bsd-3-clause
| 1,855
| 0.012399
|
#!/usr/bin/python
################
# The MIT License (MIT)
#
# Copyright (c) <2013> <Martin de Bruyn>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
############################################################
#----------------------------------------------------------------------#
"""@ package Input
Keep all inputs here.
"""
# System imports
import logging as log
import sys
# Panda imports
from direct.showbase.InputStateGlobal import inputState
from direct.showbase.DirectObject import DirectObject
# MeoTech imports
#----------------------------------------------------------------------#
class InputHandler(DirectObject):
"""InputHandler.
Keyboard stuff
"""
def __init__(self, _game):
"""InputHandler INIT"""
# Game
self.game = _game
# Keyboard
inputState.watchWithModifiers('forward', 'w')
inputState.watchWithModifiers('left', 'a')
inputState.watchWithModifiers('reverse', 's')
inputState.watchWithModifiers('right', 'd')
inputState.watchWithModifiers('turnLeft', 'q')
inputState.watchWithModifiers('turnRight', 'e')
inputState.watchWithModifiers('space', 'space')
#inputState.watchWithModifiers('ctrl', 'lcontrol_down')
self.accept("mouse1", self.shootLight)
# App exit temp
base.accept("escape", sys.exit)
# mouse
self.winXhalf = base.win.getXSize()/2
self.winYhalf = base.win.getYSize()/2
# Should move the camera stuff to the baseCamera.py
base.camera.reparentTo(self.game.meotech.engine.GameObjects["player"].bulletBody)
base.camLens.setFov(90)
base.camLens.setNear(0.5)
self.mouseSpeedX = 15
self.mouseSpeedY = 0.2
self.camP = 10
def shootLight(self):
print "shoot"
cone = self.game.player.flashlightConeBody
base.messenger.send("shootLight", [cone])
def getMouse(self, dt):
player = self.game.meotech.engine.GameObjects["player"]
flashlight = self.game.player.flashlightConeBody
flashlight_lamp = self.game.player.flashlight
flashlight_light = self.game.player.flashlightLight
# Handle mouse
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
omega = (x - self.winXhalf)*-self.mouseSpeedX
player.bulletBody.node().setAngularMovement(omega)
#flashlight.setH(flashlight, base.camera.getH())
cam = base.cam.getP() - (y - self.winYhalf) * self.mouseSpeedY
flashlight.setHpr(base.cam.getHpr())
if cam <-80:
cam = -80
elif cam > 90:
cam = 90
base.cam.setP(cam)
flashlight.setP(cam + 90)
flashlight_lamp.setZ(flashlight.getZ() - 0.6)
flashlight_lamp.setY(flashlight.getY() - 0.55)
flashlight_light.setHpr(flashlight_lamp.find("LightPos").getHpr() + 90)
|
grimfang/quickShadows
|
src/game/input.py
|
Python
|
mit
| 4,080
| 0.002206
|
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import NotFound
from framework.auth.oauth_scopes import CoreScopes
from osf.models import (
Guid,
BaseFileNode,
FileVersion,
QuickFilesNode
)
from api.base.exceptions import Gone
from api.base.permissions import PermissionWithGetter
from api.base.throttling import CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle
from api.base import utils
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ReadOnlyIfRegistration
from api.files.permissions import IsPreprintFile
from api.files.permissions import CheckedOutOrAdmin
from api.files.serializers import FileSerializer
from api.files.serializers import FileDetailSerializer, QuickFilesDetailSerializer
from api.files.serializers import FileVersionSerializer
class FileMixin(object):
"""Mixin with convenience methods for retrieving the current file based on the
current URL. By default, fetches the file based on the file_id kwarg.
"""
serializer_class = FileSerializer
file_lookup_url_kwarg = 'file_id'
def get_file(self, check_permissions=True):
try:
obj = utils.get_object_or_error(BaseFileNode, self.kwargs[self.file_lookup_url_kwarg], self.request, display_name='file')
except NotFound:
obj = utils.get_object_or_error(Guid, self.kwargs[self.file_lookup_url_kwarg], self.request).referent
if obj.is_deleted:
raise Gone(detail='The requested file is no longer available.')
if not isinstance(obj, BaseFileNode):
raise NotFound
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class FileDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_detail).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
IsPreprintFile,
CheckedOutOrAdmin,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'node'),
PermissionWithGetter(ReadOnlyIfRegistration, 'node'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileDetailSerializer
throttle_classes = (CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle, )
view_category = 'files'
view_name = 'file-detail'
def get_serializer_class(self):
try:
node = self.get_node()
except (NotFound, Gone):
return FileDetailSerializer
else:
if isinstance(node, QuickFilesNode):
return QuickFilesDetailSerializer
return FileDetailSerializer
def get_node(self):
return self.get_file().node
# overrides RetrieveAPIView
def get_object(self):
user = utils.get_user_auth(self.request).user
file = self.get_file()
if self.request.GET.get('create_guid', False):
# allows quickfiles to be given guids when another user wants a permanent link to it
if (self.get_node().has_permission(user, 'admin') and utils.has_admin_scope(self.request)) or file.node.is_quickfiles:
file.get_guid(create=True)
return file
class FileVersionsList(JSONAPIBaseView, generics.ListAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_versions).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'node'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'file-versions'
ordering = ('-modified',)
def get_queryset(self):
self.file = self.get_file()
return self.file.versions.all()
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
def node_from_version(request, view, obj):
return view.get_file(check_permissions=False).node
class FileVersionDetail(JSONAPIBaseView, generics.RetrieveAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_version_detail).
"""
version_lookup_url_kwarg = 'version_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, node_from_version)
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'version-detail'
# overrides RetrieveAPIView
def get_object(self):
self.file = self.get_file()
maybe_version = self.file.get_version(self.kwargs[self.version_lookup_url_kwarg])
# May raise a permission denied
# Kinda hacky but versions have no reference to node or file
self.check_object_permissions(self.request, file)
return utils.get_object_or_error(FileVersion, getattr(maybe_version, '_id', ''), self.request)
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
|
chennan47/osf.io
|
api/files/views.py
|
Python
|
apache-2.0
| 5,890
| 0.002207
|
#/usr/bin/env python
import os
import cv2
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
disp_n = 200
s_time = 3
radius = 3
thickness = 3
cls_color = (23, 119, 188)
colors = [
(0, 0, 255),
(0, 255, 0),
(255, 0, 0),
(23, 119, 188),
(222, 12, 39),
(122, 212, 139),
(20, 198, 68),
(111, 12, 139),
(131, 112, 179),
(31, 211, 79),
(131, 121, 179),
(31, 121, 192),
(192, 21, 92),
(192, 21, 192),
(216, 121, 92),
(16, 11, 62),
(16, 111, 162),
(96, 46, 12),
]
n_colors = len(colors)
def _mkdirs(path):
if not os.path.isdir(path):
os.makedirs(path)
# only one ground-truths for per image
def _read_gt(filepath):
'''format: imgidx objidx bbox cls'''
pd_dt = {}
pd_c = 0
fh = open(filepath)
for line in fh.readlines():
pd_c = pd_c + 1
line = line.strip()
info = line.split()
assert len(info) >= 1
imgidx, info = info[0], info[1:]
assert len(info) == 6
imgidx = imgidx.strip()
objidx = info[0].strip()
x1 = info[1].strip()
y1 = info[2].strip()
x2 = info[3].strip()
y2 = info[4].strip()
cls = info[5].strip()
objidx = int(objidx)
assert objidx == 0
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
pd_dt[imgidx] = [x1, y1, x2, y2]
fh.close()
assert pd_c == len(pd_dt.keys())
return pd_dt
# multiple or one for prediction
def _read_pd(filepath, in_dire, is_in_dire=False):
'''format: imgidx score bbox cls'''
gt_dt = {}
gt_c = 0
fh = open(filepath)
imgidxs = []
for line in fh.readlines():
gt_c = gt_c + 1
line = line.strip()
info = line.split()
assert len(info) >= 1
im_path, info = info[0], info[1:]
assert len(info) == 6
im_path = im_path.strip()
score = info[0].strip()
x1 = info[1].strip()
y1 = info[2].strip()
x2 = info[3].strip()
y2 = info[4].strip()
cls = info[5].strip()
if is_in_dire:
im_name = im_path[len(in_dire):]
else:
im_name = os.path.basename(im_path)
imgidx = im_name.strip().rsplit(".", 1)[0]
imgidx = imgidx.strip()
if imgidx in imgidxs:
print imgidx, line
imgidxs.append(imgidx)
score = float(score)
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
gt_dt[imgidx] = [x1, y1, x2, y2]
fh.close()
print len(imgidxs)
print len(set(imgidxs))
assert gt_c == len(gt_dt.keys()), "gt_c: %s, n_keys: %s" \
% (gt_c, len(gt_dt.keys()))
return gt_dt
def _area(box):
assert len(box) == 4
w = box[2] - box[0] + 1
h = box[3] - box[1] + 1
a = w * h
assert a >= 0
return a
def _overlap(pd_box, gt_box):
pa = _area(pd_box)
ga = _area(gt_box)
x1 = max(pd_box[0], gt_box[0])
y1 = max(pd_box[1], gt_box[1])
x2 = min(pd_box[2], gt_box[2])
y2 = min(pd_box[3], gt_box[3])
if x1 > x2 or y1 > y2:
oa = 0
else:
oa = _area([x1, y1, x2, y2])
return oa / (pa + ga - oa + 0.0)
def _iou(pd_file, gt_file, in_dire, is_in_dire=False):
''''''
pd_dt = _read_pd(pd_file, in_dire, is_in_dire=is_in_dire)
gt_dt = _read_gt(gt_file)
assert len(pd_dt.keys()) == len(gt_dt.keys())
imgidxs = pd_dt.keys()
imgidxs.sort()
disp_c = 0
ovs = []
for imgidx in imgidxs:
disp_c += 1
if disp_c % disp_n == 0:
print "disp_c:", disp_c
pd_box = pd_dt[imgidx]
gt_box = gt_dt[imgidx]
ov = _overlap(pd_box, gt_box)
ovs.append(ov)
if disp_c % disp_n != 0:
print "disp_c:", disp_c
print "\n\nDone.\n\n"
return ovs
def _recall(ovs, thresolds):
n_ovs = len(ovs) # n_examples
n_thres = len(thresolds)
precision = np.zeros(n_thres) # np.zeros((n_thres,), dtype=np.int)
recall = np.zeros(n_thres) # np.zeros((n_thres,), dtype=np.int)
print recall.shape
for j in xrange(n_thres):
acc_c = 0
thres = thresolds[j]
for j2 in xrange(n_ovs):
ov = ovs[j2]
if ov > thres:
acc_c += 1
acc_c = acc_c / (n_ovs + 0.)
precision[j] = acc_c
recall[j] = acc_c
return recall
def _all_recall_pics(ovs_list, type_names, title, out_path=None, legend_loc="upper right"):
'''Plot Precision-Recall curve'''
plt.clf()
plt.grid(True)
plt.xlabel('IoU')
plt.ylabel('Recall')
# plt.ylim([0.0, 1.0])
# plt.xlim([0.5, 1.0])
n_dataset = len(ovs_list)
assert n_dataset == len(type_names)
thresolds = [j / 100.0 for j in xrange(50, 101, 1)]
for j in xrange(n_dataset):
ovs = ovs_list[j]
name = type_names[j]
recall = _recall(ovs, thresolds)
plt.plot(thresolds, recall, label=name)
plt.xticks(np.arange(0.50, 1.01, 0.05))
plt.yticks(np.arange(0.0, 1.01, 0.1))
plt.title(title)
plt.legend(loc=legend_loc)
plt.savefig(out_path)
if out_path is None:
plt.show()
else:
plt.savefig(out_path)
def torso_run():
''''''
ovs_list = []
type_names = []
out_path = "/pathTo/../res.pics/torso.recall.png"
## flic test
pd_file = "/pathTo/../dataset/FLIC/vision/flic_torso_test.txt"
gt_file = "/pathTo/../dataset/FLIC/labels/crop_test_torso_labels2.txt"
in_dire = "/pathTo/../dataset/FLIC/crop.images2/test/"
is_in_dire = False
type_names.append("FLIC Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## bbc pose -> test & val
pd_file = "/pathTo/../dataset/bbc_pose/torso_masks/test_torso_results.txt"
gt_file = "/pathTo/../dataset/bbc_pose/labels/crop_test_torso.label"
in_dire = "/pathTo/../dataset/bbc_pose/crop.data/"
is_in_dire = True
type_names.append("BBC Pose Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## kinect2
pd_file = "/pathTo/../dataset/Kinect2/torso_masks/test_torso_results.txt"
gt_file = "/pathTo/../dataset/Kinect2/labels/up.crop.color2_test_torso_l7.log"
in_dire = "/pathTo/../dataset/Kinect2/up.crop.color/"
is_in_dire = False
type_names.append("Kinect2 Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
# pic -> viz
title = 'Recall for Torso Detection'
_all_recall_pics(ovs_list, type_names, title, out_path=out_path)
def person_run():
''''''
ovs_list = []
type_names = []
out_path = "/pathTo/../res.pics/person.recall.png"
## bbc pose -> test & val
pd_file = "/pathTo/../dataset/bbc_pose/test_person_results.txt"
gt_file = "/pathTo/../dataset/bbc_pose/labels/pbbox_test_cls.txt"
in_dire = "/pathTo/../dataset/bbc_pose/data/"
is_in_dire = True
type_names.append("BBC Pose Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## kinect2
pd_file = "/pathTo/../dataset/Kinect2/test_person_results.txt"
gt_file = "/pathTo/../dataset/Kinect2/labels/up.color2.pbbox.test.log"
in_dire = "/pathTo/../dataset/Kinect2/up.color/"
is_in_dire = False
type_names.append("Kinect2 Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
# pic -> viz
title = 'Recall for Person Detection'
_all_recall_pics(ovs_list, type_names, title, out_path=out_path, legend_loc="lower left")
if __name__ == '__main__':
''''''
# torso_run()
person_run()
|
zimenglan-sysu-512/pose_action_caffe
|
results/pic_iou_curve.py
|
Python
|
mit
| 7,311
| 0.045137
|
# based on https://github.com/pypa/sampleproject/blob/master/setup.py
# see http://packaging.python.org/en/latest/tutorial.html#creating-your-own-project
from setuptools import setup, find_packages
from setuptools.command.install import install as stdinstall
import codecs
import os
import re
import sys
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_file_contents(filename):
with codecs.open(filename, encoding='utf-8') as f:
contents = f.read()
return contents
package_name = "typecheck-decorator"
class install_with_test(stdinstall):
def run(self):
stdinstall.run(self) # normal install
##pip/setuptools makes this unbuffering unhelpful:
#sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1) # make line-buffered
#sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1) # make line-buffered
#import typecheck.test_typecheck_decorator # execute post-install test (during beta only)
setup(
# setup customization:
cmdclass={'install': install_with_test},
# basic information:
name=package_name,
version=find_version('typecheck', '__init__.py'),
description="flexible explicit run-time type checking of function arguments (Python3-only)",
long_description=get_file_contents("README.rst"),
# The project URL:
url='http://github.com/prechelt/' + package_name,
# Author details:
author='Dmitry Dvoinikov, Lutz Prechelt',
author_email='prechelt@inf.fu-berlin.de',
# Classification:
license='BSD License',
classifiers=[
'License :: OSI Approved :: BSD License',
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Documentation',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='type-checking',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ['typing;python_version<"3.5"'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'typecheck': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
###data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
### entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
prechelt/typecheck-decorator
|
setup.py
|
Python
|
bsd-2-clause
| 3,969
| 0.007055
|
"""Helpers to execute scripts."""
import logging
from contextlib import suppress
from itertools import islice
from typing import Optional, Sequence
import voluptuous as vol
from homeassistant.core import HomeAssistant, Context, callback
from homeassistant.const import CONF_CONDITION, CONF_TIMEOUT
from homeassistant import exceptions
from homeassistant.helpers import (
service, condition, template as template,
config_validation as cv)
from homeassistant.helpers.event import (
async_track_point_in_utc_time, async_track_template)
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as date_util
from homeassistant.util.async_ import (
run_coroutine_threadsafe, run_callback_threadsafe)
_LOGGER = logging.getLogger(__name__)
CONF_ALIAS = 'alias'
CONF_SERVICE = 'service'
CONF_SERVICE_DATA = 'data'
CONF_SEQUENCE = 'sequence'
CONF_EVENT = 'event'
CONF_EVENT_DATA = 'event_data'
CONF_EVENT_DATA_TEMPLATE = 'event_data_template'
CONF_DELAY = 'delay'
CONF_WAIT_TEMPLATE = 'wait_template'
CONF_CONTINUE = 'continue_on_timeout'
ACTION_DELAY = 'delay'
ACTION_WAIT_TEMPLATE = 'wait_template'
ACTION_CHECK_CONDITION = 'condition'
ACTION_FIRE_EVENT = 'event'
ACTION_CALL_SERVICE = 'call_service'
def _determine_action(action):
"""Determine action type."""
if CONF_DELAY in action:
return ACTION_DELAY
if CONF_WAIT_TEMPLATE in action:
return ACTION_WAIT_TEMPLATE
if CONF_CONDITION in action:
return ACTION_CHECK_CONDITION
if CONF_EVENT in action:
return ACTION_FIRE_EVENT
return ACTION_CALL_SERVICE
def call_from_config(hass: HomeAssistant, config: ConfigType,
variables: Optional[Sequence] = None,
context: Optional[Context] = None) -> None:
"""Call a script based on a config entry."""
Script(hass, cv.SCRIPT_SCHEMA(config)).run(variables, context)
class _StopScript(Exception):
"""Throw if script needs to stop."""
class _SuspendScript(Exception):
"""Throw if script needs to suspend."""
class Script():
"""Representation of a script."""
def __init__(self, hass: HomeAssistant, sequence, name: str = None,
change_listener=None) -> None:
"""Initialize the script."""
self.hass = hass
self.sequence = sequence
template.attach(hass, self.sequence)
self.name = name
self._change_listener = change_listener
self._cur = -1
self._exception_step = None
self.last_action = None
self.last_triggered = None
self.can_cancel = any(CONF_DELAY in action or CONF_WAIT_TEMPLATE
in action for action in self.sequence)
self._async_listener = []
self._template_cache = {}
self._config_cache = {}
self._actions = {
ACTION_DELAY: self._async_delay,
ACTION_WAIT_TEMPLATE: self._async_wait_template,
ACTION_CHECK_CONDITION: self._async_check_condition,
ACTION_FIRE_EVENT: self._async_fire_event,
ACTION_CALL_SERVICE: self._async_call_service,
}
@property
def is_running(self) -> bool:
"""Return true if script is on."""
return self._cur != -1
def run(self, variables=None, context=None):
"""Run script."""
run_coroutine_threadsafe(
self.async_run(variables, context), self.hass.loop).result()
async def async_run(self, variables: Optional[Sequence] = None,
context: Optional[Context] = None) -> None:
"""Run script.
This method is a coroutine.
"""
self.last_triggered = date_util.utcnow()
if self._cur == -1:
self._log('Running script')
self._cur = 0
# Unregister callback if we were in a delay or wait but turn on is
# called again. In that case we just continue execution.
self._async_remove_listener()
for cur, action in islice(enumerate(self.sequence), self._cur, None):
try:
await self._handle_action(action, variables, context)
except _SuspendScript:
# Store next step to take and notify change listeners
self._cur = cur + 1
if self._change_listener:
self.hass.async_add_job(self._change_listener)
return
except _StopScript:
break
except Exception:
# Store the step that had an exception
self._exception_step = cur
# Set script to not running
self._cur = -1
self.last_action = None
# Pass exception on.
raise
# Set script to not-running.
self._cur = -1
self.last_action = None
if self._change_listener:
self.hass.async_add_job(self._change_listener)
def stop(self) -> None:
"""Stop running script."""
run_callback_threadsafe(self.hass.loop, self.async_stop).result()
def async_stop(self) -> None:
"""Stop running script."""
if self._cur == -1:
return
self._cur = -1
self._async_remove_listener()
if self._change_listener:
self.hass.async_add_job(self._change_listener)
@callback
def async_log_exception(self, logger, message_base, exception):
"""Log an exception for this script.
Should only be called on exceptions raised by this scripts async_run.
"""
# pylint: disable=protected-access
step = self._exception_step
action = self.sequence[step]
action_type = _determine_action(action)
error = None
meth = logger.error
if isinstance(exception, vol.Invalid):
error_desc = "Invalid data"
elif isinstance(exception, exceptions.TemplateError):
error_desc = "Error rendering template"
elif isinstance(exception, exceptions.Unauthorized):
error_desc = "Unauthorized"
elif isinstance(exception, exceptions.ServiceNotFound):
error_desc = "Service not found"
else:
# Print the full stack trace, unknown error
error_desc = 'Unknown error'
meth = logger.exception
error = ""
if error is None:
error = str(exception)
meth("%s. %s for %s at pos %s: %s",
message_base, error_desc, action_type, step + 1, error)
async def _handle_action(self, action, variables, context):
"""Handle an action."""
await self._actions[_determine_action(action)](
action, variables, context)
async def _async_delay(self, action, variables, context):
"""Handle delay."""
# Call ourselves in the future to continue work
unsub = None
@callback
def async_script_delay(now):
"""Handle delay."""
# pylint: disable=cell-var-from-loop
with suppress(ValueError):
self._async_listener.remove(unsub)
self.hass.async_create_task(
self.async_run(variables, context))
delay = action[CONF_DELAY]
try:
if isinstance(delay, template.Template):
delay = vol.All(
cv.time_period,
cv.positive_timedelta)(
delay.async_render(variables))
elif isinstance(delay, dict):
delay_data = {}
delay_data.update(
template.render_complex(delay, variables))
delay = cv.time_period(delay_data)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error("Error rendering '%s' delay template: %s",
self.name, ex)
raise _StopScript
self.last_action = action.get(
CONF_ALIAS, 'delay {}'.format(delay))
self._log("Executing step %s" % self.last_action)
unsub = async_track_point_in_utc_time(
self.hass, async_script_delay,
date_util.utcnow() + delay
)
self._async_listener.append(unsub)
raise _SuspendScript
async def _async_wait_template(self, action, variables, context):
"""Handle a wait template."""
# Call ourselves in the future to continue work
wait_template = action[CONF_WAIT_TEMPLATE]
wait_template.hass = self.hass
self.last_action = action.get(CONF_ALIAS, 'wait template')
self._log("Executing step %s" % self.last_action)
# check if condition already okay
if condition.async_template(
self.hass, wait_template, variables):
return
@callback
def async_script_wait(entity_id, from_s, to_s):
"""Handle script after template condition is true."""
self._async_remove_listener()
self.hass.async_create_task(
self.async_run(variables, context))
self._async_listener.append(async_track_template(
self.hass, wait_template, async_script_wait, variables))
if CONF_TIMEOUT in action:
self._async_set_timeout(
action, variables, context,
action.get(CONF_CONTINUE, True))
raise _SuspendScript
async def _async_call_service(self, action, variables, context):
"""Call the service specified in the action.
This method is a coroutine.
"""
self.last_action = action.get(CONF_ALIAS, 'call service')
self._log("Executing step %s" % self.last_action)
await service.async_call_from_config(
self.hass, action,
blocking=True,
variables=variables,
validate_config=False,
context=context
)
async def _async_fire_event(self, action, variables, context):
"""Fire an event."""
self.last_action = action.get(CONF_ALIAS, action[CONF_EVENT])
self._log("Executing step %s" % self.last_action)
event_data = dict(action.get(CONF_EVENT_DATA, {}))
if CONF_EVENT_DATA_TEMPLATE in action:
try:
event_data.update(template.render_complex(
action[CONF_EVENT_DATA_TEMPLATE], variables))
except exceptions.TemplateError as ex:
_LOGGER.error('Error rendering event data template: %s', ex)
self.hass.bus.async_fire(action[CONF_EVENT],
event_data, context=context)
async def _async_check_condition(self, action, variables, context):
"""Test if condition is matching."""
config_cache_key = frozenset((k, str(v)) for k, v in action.items())
config = self._config_cache.get(config_cache_key)
if not config:
config = condition.async_from_config(action, False)
self._config_cache[config_cache_key] = config
self.last_action = action.get(CONF_ALIAS, action[CONF_CONDITION])
check = config(self.hass, variables)
self._log("Test condition {}: {}".format(self.last_action, check))
if not check:
raise _StopScript
def _async_set_timeout(self, action, variables, context,
continue_on_timeout):
"""Schedule a timeout to abort or continue script."""
timeout = action[CONF_TIMEOUT]
unsub = None
@callback
def async_script_timeout(now):
"""Call after timeout is retrieve."""
with suppress(ValueError):
self._async_listener.remove(unsub)
# Check if we want to continue to execute
# the script after the timeout
if continue_on_timeout:
self.hass.async_create_task(
self.async_run(variables, context))
else:
self._log("Timeout reached, abort script.")
self.async_stop()
unsub = async_track_point_in_utc_time(
self.hass, async_script_timeout,
date_util.utcnow() + timeout
)
self._async_listener.append(unsub)
def _async_remove_listener(self):
"""Remove point in time listener, if any."""
for unsub in self._async_listener:
unsub()
self._async_listener.clear()
def _log(self, msg):
"""Logger helper."""
if self.name is not None:
msg = "Script {}: {}".format(self.name, msg)
_LOGGER.info(msg)
|
MartinHjelmare/home-assistant
|
homeassistant/helpers/script.py
|
Python
|
apache-2.0
| 12,633
| 0
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRvcheck(RPackage):
"""Check latest release version of R and R package (both in 'CRAN',
'Bioconductor' or 'Github')."""
homepage = "https://cloud.r-project.org/package=rvcheck"
url = "https://cloud.r-project.org/src/contrib/rvcheck_0.0.9.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rvcheck"
version('0.1.3', sha256='0b59986c1ccc5b89f8aca8fa7cf62d0b875719addb40e08dbda1791cfd334fc4')
version('0.0.9', sha256='6e7be7b029d28181a1b57ebd4d25978f3459722ffdb45a3698157a7f943bea92')
depends_on('r@3.3.0:', when='@:0.1.1', type=('build', 'run'))
depends_on('r@3.4.0:', when='@0.1.3:', type=('build', 'run'))
depends_on('r-rlang', when='@0.1.1:', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-rvcheck/package.py
|
Python
|
lgpl-2.1
| 958
| 0.003132
|
import numpy as np
import tensorflow as tf
import dists
from misc import *
|
davmre/bayesflow
|
elbow/util/__init__.py
|
Python
|
bsd-3-clause
| 77
| 0
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._roberts.
Roberts similarity
"""
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Roberts']
class Roberts(_TokenDistance):
r"""Roberts similarity.
For two multisets X and Y drawn from an alphabet S, Roberts similarity
:cite:`Roberts:1986` is
.. math::
sim_{Roberts}(X, Y) =
\frac{\Big[\sum_{i \in S} (X_i + Y_i) \cdot
\frac{min(X_i, Y_i)}{max(X_i, Y_i)}\Big]}
{\sum_{i \in S} (X_i + Y_i)}
.. versionadded:: 0.4.0
"""
def __init__(
self, tokenizer: Optional[_Tokenizer] = None, **kwargs: Any
) -> None:
"""Initialize Roberts instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
.. versionadded:: 0.4.0
"""
super(Roberts, self).__init__(tokenizer=tokenizer, **kwargs)
def sim(self, src: str, tar: str) -> float:
"""Return the Roberts similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Roberts similarity
Examples
--------
>>> cmp = Roberts()
>>> cmp.sim('cat', 'hat')
0.5
>>> cmp.sim('Niall', 'Neil')
0.36363636363636365
>>> cmp.sim('aluminum', 'Catalan')
0.11764705882352941
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
alphabet = self._total().keys()
return sum(
(self._src_tokens[i] + self._tar_tokens[i])
* min(self._src_tokens[i], self._tar_tokens[i])
/ max(self._src_tokens[i], self._tar_tokens[i])
for i in alphabet
) / sum((self._src_tokens[i] + self._tar_tokens[i]) for i in alphabet)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
chrislit/abydos
|
abydos/distance/_roberts.py
|
Python
|
gpl-3.0
| 3,233
| 0
|
# subSystemBonusGallenteElectronic2TractorBeamVelocity
#
# Used by:
# Subsystem: Proteus Electronics - Emergent Locus Analyzer
type = "passive"
def handler(fit, module, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Tractor Beam",
"maxTractorVelocity", module.getModifiedItemAttr("subsystemBonusGallenteElectronic2"),
skill="Gallente Electronic Systems")
|
Ebag333/Pyfa
|
eos/effects/subsystembonusgallenteelectronic2tractorbeamvelocity.py
|
Python
|
gpl-3.0
| 458
| 0.004367
|
from django.db import models
from django.db.models.fields.files import FieldFile
from django.core.files import File
def get_video_dimensions(path):
from ffvideo import VideoStream
vs = VideoStream(path)
return (vs.frame_width, vs.frame_height)
class VideoFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_video_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_video_dimensions()[1]
height = property(_get_height)
def _get_video_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_video_dimensions(self.path)
return self._dimensions_cache
# A video field is exactly a file field with a different signature
class VideoFieldFile(VideoFile, FieldFile):
pass
class VideoField(models.FileField):
attr_class = VideoFieldFile
|
akhilari7/pa-dude
|
lib/python2.7/site-packages/image/video_field.py
|
Python
|
mit
| 1,087
| 0
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygame, pygame.draw, pygame.event
from pygame.locals import *
import random, math, sys
# galaxy specification
sectorSize = [10, 10]
sectorsOffset = [0, 0]
galaxyID = 'Center90'
sectorsSpec = [
[ (0,0), (9,1), (0,0)],
[ (9,1), (9,0), (9,1)],
[ (0,0), (9,1), (0,0)],
]
if 0: # small galaxy
galaxyID = 'Circle4P'
galaxyCenter = (20.0, 20.0)
galaxyRadius = 20.0
galaxyStartR = (14.0, 16.0)
galaxyPlayers = 16
galaxyPlayerGroup = 2
galaxyGroupDist = 1.0
galaxyMinR = 5
galaxyDensity = {5: 3, 10: 3, 15: 3}
galaxyResources = {
# format resourceID : (minDist, maxDist, number of resources)
1 : (12, 15, 0), # TL 1 + 2
2 : (12, 15, 0), # TL 1 + 2
3 : (8, 11, 0), # TL 3 + 4
4 : (8, 11, 0), # TL 3 + 4
5 : (8, 11, 0), # TL 3 + 4
6 : (5, 6, 0), # TL 5
7 : (5, 6, 0), # TL 5
8 : (5, 6, 0), # TL 5
}
galaxyDiseases = {
# format diseaseID : (minDist, maxDist, number of diseases)
1 : (12, 15, 0), # TL 1 + 2
2 : (12, 15, 0), # TL 1 + 2
3 : (8, 11, 0), # TL 3 + 4
4 : (8, 11, 0), # TL 3 + 4
5 : (8, 11, 0), # TL 3 + 4
6 : (5, 6, 0), # TL 5
7 : (5, 6, 0), # TL 5
8 : (5, 6, 0), # TL 5
}
if 0: # THIS IS THE RECOMENDED MEDIUM GALAXY
galaxyID = 'Circle42P'
galaxyCenter = (50.0, 50.0)
galaxyRadius = 50.0
galaxyStartR = (32.0, 36.0)
#galaxyPlayers = 30
#galaxyPlayerGroup = 2
galaxyPlayers = 42
galaxyPlayerGroup = 3
galaxyGroupDist = 4.0
galaxyMinR = 7.5
galaxyDensity = {7.5: 3, 10: 4, 20: 5, 30: 5.5, 40: 6, 50: 6}
galaxyResources = {
# format resourceID : (minDist, maxDist, number of resources)
1 : (20, 45, 15), # TL 1 + 2
2 : (20, 45, 15), # TL 1 + 2
3 : (8, 15, 7), # TL 3 + 4
4 : (8, 15, 7), # TL 3 + 4
5 : (8, 15, 7), # TL 3 + 4
6 : (7.5, 9, 1), # TL 5
7 : (7.5, 9, 1), # TL 5
8 : (7.5, 9, 1), # TL 5
}
galaxyDiseases = {
# format diseaseID : (minDist, maxDist, number of diseases)
1 : (20, 45, 8), # TL 1 + 2
2 : (20, 45, 8), # TL 1 + 2
3 : (5, 15, 4), # TL 3 + 4
4 : (5, 15, 4), # TL 3 + 4
5 : (5, 15, 4), # TL 3 + 4
6 : (0, 5, 1), # TL 5
7 : (0, 5, 1), # TL 5
8 : (0, 5, 1), # TL 5
}
if 0: # Large Galaxy
galaxyID = 'Circle65P'
galaxyCenter = (75.0, 75.0)
galaxyRadius = 75.0
galaxyStartR = (45.0, 52.5)
galaxyPlayers = 65
galaxyPlayerGroup = 5
#galaxyPlayers = 48
#galaxyPlayerGroup = 4
galaxyGroupDist = 8
galaxyMinR = 7.5
galaxyDensity = {7.5: 3, 10: 4, 20: 5, 30: 5.5, 60: 6, 75: 6}
galaxyResources = {
# format resourceID : (minDist, maxDist, number of resources)
1 : (20, 67.5, 45), # TL 1 + 2
2 : (20, 67.5, 45), # TL 1 + 2
3 : (10, 20, 10), # TL 3 + 4
4 : (10, 20, 10), # TL 3 + 4
5 : (10, 20, 10), # TL 3 + 4
6 : (7.5, 9, 1), # TL 5
7 : (7.5, 9, 1), # TL 5
8 : (7.5, 9, 1), # TL 5
}
galaxyDiseases = {
# format diseaseID : (minDist, maxDist, number of diseases)
1 : (20, 67.5, 16), # TL 1 + 2
2 : (20, 67.5, 16), # TL 1 + 2
3 : (5, 15, 4), # TL 3 + 4
4 : (5, 15, 4), # TL 3 + 4
5 : (5, 15, 4), # TL 3 + 4
6 : (0, 5, 1), # TL 5
7 : (0, 5, 1), # TL 5
8 : (0, 5, 1), # TL 5
}
class Galaxy:
def __init__(self):
self.systems = []
self.centerX = 0.0
self.centerY = 0.0
self.radius = 0.0
class System:
def __init__(self):
self.x = 0.0
self.y = 0.0
self.name = '?'
self.compOf = None
self.starClass = '?'
self.starSubclass = 0
self.planets = []
self._closest = []
self.hasSR = 0
self.hasDisease = 0
self._moveable = 1
class Planet:
def __init__(self):
self.compOf = None
self.type = '?'
self.diameter = 0
self.minerals = 0
self.environ = 0
self.energy = 0
self.slots = 0
self.maxSlots = 0
self.starting = 0
self.strategicRes = 0
self.disease = 0
def generateGalaxy(galaxy):
secX = 0
for sectors in sectorsSpec:
secY = 0
for sector, starting in sectors:
minX = secX * sectorSize[0] + sectorsOffset[0]
maxX = minX + sectorSize[0]
minY = secY * sectorSize[1] + sectorsOffset[1]
maxY = minY + sectorSize[1]
for i in xrange(0, sector):
system = System()
galaxy.systems.append(system)
system.x = random.uniform(minX, maxX)
system.y = random.uniform(minY, maxY)
system.compOf = galaxy
generateSystem(system)
for i in xrange(0, starting):
x = random.uniform(minX, maxX)
y = random.uniform(minY, maxY)
galaxy.systems.append(generateStartingSystem(galaxy, x, y))
secY += 1
secX += 1
def generateStartingSystem(galaxy, x, y):
while 1:
system = System()
system.x = x
system.y = y
system.compOf = galaxy
generateSystem(system)
# check system properties
e = 0
h = 0
d = 0
ok = 1
for planet in system.planets:
if planet.type == 'E': e += 1; planet.starting = 1
elif planet.type in ('D', 'R', 'C'):
if planet.slots > 5: d += 1
else: ok = 0; break
elif planet.type == 'H': h += 1
elif planet.type == 'M': ok = 0; break
if ok and e == 1 and h == 1 and d == 1:
break
return system
def generateGalaxy2(galaxy):
galaxy.centerX = galaxyCenter[0]
galaxy.centerY = galaxyCenter[1]
galaxy.radius = galaxyRadius
r = galaxyMinR + random.uniform(0, 0.5)
dkeys = galaxyDensity.keys()
dkeys.sort()
dkeys.reverse()
prevR = 5
while r <= galaxyRadius:
for key in dkeys:
if key <= r:
density = galaxyDensity[key]
break
print r, density
d = 2 * math.pi * r
aoff = random.uniform(0, math.pi * 2)
dangle = density / d * math.pi * 0.9
for i in range(0, d / density):
angle = aoff + i * density / d * math.pi * 2
angle += random.uniform(-dangle, dangle)
tr = random.uniform(prevR + 0.1, r)
while 1:
acceptable = 0
system = System()
generateSystem(system)
# check requirements
for planet in system.planets:
if planet.type in ('D', 'R', 'C', 'H', 'M', 'E') and \
planet.slots > 0:
acceptable = 1
break
if acceptable:
break
galaxy.systems.append(system)
system.x = math.cos(angle) * tr + galaxyCenter[0]
system.y = math.sin(angle) * tr + galaxyCenter[1]
system.compOf = galaxy
system.dist = tr
system.angle = angle
prevR = r
r += random.uniform(2, 4)
# generate central black hole
system = System()
system.x = galaxyCenter[0]
system.y = galaxyCenter[1]
system.starClass = "b-"
system.starSubclass = 7
system.compOf = galaxy
system._moveable = 0
galaxy.systems.append(system)
# generate starting systems
if galaxyPlayers:
r = (galaxyStartR[0] + galaxyStartR[1]) / 2
d = 2 * math.pi * r
print "Player distance:", d / galaxyPlayers
gaoff = random.uniform(0, math.pi * 2)
for i in range(0, galaxyPlayers / galaxyPlayerGroup):
print "Placing group:", i + 1, "of", galaxyPlayers / galaxyPlayerGroup
angle = gaoff + i * math.pi * 2 / (galaxyPlayers / galaxyPlayerGroup)
tr = random.uniform(galaxyStartR[0], galaxyStartR[1])
gx = math.cos(angle) * tr + galaxyCenter[0]
gy = math.sin(angle) * tr + galaxyCenter[1]
aoff = random.uniform(0, math.pi * 2)
for j in range(0, galaxyPlayerGroup):
angle = aoff + j * math.pi * 2 / galaxyPlayerGroup
x = math.cos(angle) * galaxyGroupDist + gx
y = math.sin(angle) * galaxyGroupDist + gy
system = generateStartingSystem(galaxy, x, y)
galaxy.systems.append(system)
# strategic resources
keys = galaxyResources.keys()
keys.sort()
keys.reverse()
for key in keys:
print "Placing resource", key
minR, maxR, count = galaxyResources[key]
aoff = random.uniform(0, math.pi * 2)
for i in range(0, count):
angle = aoff + i * math.pi * 2 / count
tr = random.uniform(minR, maxR)
x = math.cos(angle) * tr + galaxyCenter[0]
y = math.sin(angle) * tr + galaxyCenter[1]
# find closest system
closest = galaxy.systems[0]
minDist = 99999 #(closest.x - x) ** 2 + (closest.y - y) ** 2
for system in galaxy.systems:
dist = (system.x - x) ** 2 + (system.y - y) ** 2
if dist < minDist and system.hasSR == 0:
hasDRC = 0
starting = 0
# find suitable planet
for planet in system.planets:
if planet.starting:
starting = 1
if planet.type in ("D", "R", "C"):
hasDRC = 1
if not starting and hasDRC:
minDist = dist
closest = system
print " System", closest.x, closest.y, math.sqrt(minDist)
# find planet on the closest system
planets = []
for planet in closest.planets:
if planet.type in ("D", "R", "C"):
planets.append(planet)
planet = random.choice(planets)
planet.strategicRes = key
system = planet.compOf
system.hasSR = 1
print " Planet", planet.type
# diseases
keys = galaxyDiseases.keys()
keys.sort()
keys.reverse()
for key in keys:
print "Placing disease", key
minR, maxR, count = galaxyDiseases[key]
aoff = random.uniform(0, math.pi * 2)
for i in range(0, count):
angle = aoff + i * math.pi * 2 / count
tr = random.uniform(minR, maxR)
x = math.cos(angle) * tr + galaxyCenter[0]
y = math.sin(angle) * tr + galaxyCenter[1]
# find closest system
closest = galaxy.systems[0]
minDist = 99999 #(closest.x - x) ** 2 + (closest.y - y) ** 2
for system in galaxy.systems:
dist = (system.x - x) ** 2 + (system.y - y) ** 2
if dist < minDist and system.hasDisease == 0:
hasHME = 0
starting = 0
# find suitable planet
for planet in system.planets:
if planet.starting:
starting = 1
if planet.type in ("M", "E"):
hasHME = 1
if not starting and hasHME:
minDist = dist
closest = system
print " System", closest.x, closest.y, math.sqrt(minDist)
# find planet on the closest system
planets = []
for planet in closest.planets:
if planet.type in ("M", "E"):
planets.append(planet)
planet = random.choice(planets)
planet.disease = key
system = planet.compOf
system.hasDisease = 1
print " Planet", planet.type
def generateSystem(system, ranges = None):
# system class and subclass
# c -> supergiant
# g -> giant
# D -> dwarf
# NS -> neutron star
# BH -> black hole
num = random.randrange(1, 1000000 + 1)
system.starSubclass = random.randrange(0, 10)
if num < 10: system.starClass = 'cB'
elif num < 20: system.starClass = 'cA'
elif num < 40: system.starClass = 'cF'
elif num < 60: system.starClass = 'cG'
elif num < 80: system.starClass = 'cK'
elif num < 100: system.starClass = 'cM'
elif num < 500: system.starClass = 'gF'
elif num < 1000: system.starClass = 'gG'
elif num < 5500: system.starClass = 'gK'
elif num < 10000: system.starClass = 'gM'
elif num < 20000: system.starClass = 'mO'; system.starSubclass = random.randrange(5, 10)
elif num < 30000: system.starClass = 'mB'
elif num < 40000: system.starClass = 'mA'
elif num < 120000: system.starClass = 'mF'
elif num < 225000: system.starClass = 'mG'
elif num < 465000: system.starClass = 'mK'
elif num < 930000: system.starClass = 'mM'
elif num < 940000: system.starClass = 'dB'
elif num < 960000: system.starClass = 'dA'
elif num < 980000: system.starClass = 'dF'
elif num < 990000: system.starClass = 'dG'
elif num < 999500: system.starClass = 'dK'
elif num < 999995: system.starClass = 'n-' # 00.0495%
elif num < 1000000: system.starClass = 'b-' # 00.0005%
else: system.starClass = 'b-' # 00.0001%
# planets
num = random.randrange(0, 100)
planets = (0, 0, 0)
mod = 1.0 / 2.0 # was 2 / 3
if system.starClass[0] in ('c', 'g'):
if num < 25:
planets = distributePlanets(mod * random.randrange(1, 7))
elif system.starClass[1] in ('O', 'B'):
if num < 25:
planets = distributePlanets(mod * random.randrange(1, 11))
elif system.starClass[1] == 'A':
if num < 75:
planets = distributePlanets(mod * random.randrange(1, 11))
elif system.starClass[1] == 'F' or system.starClass[1] == 'G':
if num < 95:
num = random.randrange(1, 7) + random.randrange(1, 7) + 3
planets = distributePlanets(mod * num)
elif system.starClass[1] == 'K':
if num < 95:
num = random.randrange(1, 7) + random.randrange(1, 7)
planets = distributePlanets(mod * num)
elif system.starClass[1] == 'M':
if num < 95:
num = random.randrange(1, 7)
planets = distributePlanets(mod * num)
elif system.starClass[0] == 'd':
if num < 10:
num = int(mod * random.randrange(1, 7) / 2)
planets = (0, 0, num)
elif system.starClass[0] == 'n' or system.starClass[0] == 'b':
if num < 5:
num = int(mod * random.randrange(1, 7) / 2)
planets = (0, 0, num)
# planets
zone = 0
for num in planets:
for i in xrange(0, num):
planet = Planet()
planet.compOf = system
system.planets.append(planet)
generatePlanet(zone, planet)
zone += 1
# sort planets by energy
system.planets.sort(lambda a, b: cmp(b.energy, a.energy))
def distributePlanets(num):
num = int(num)
if num <= 3: return (0, 1, num - 1)
elif num <= 5: return (1, 1, num - 2)
elif num <=7: return (1, 2, num - 3)
elif num <=11: return (2, 2, num - 4)
elif num <=15: return (2, 3, num - 5)
def generatePlanet(zone, planet):
sc = planet.compOf.starClass
if sc == 'mF' or sc == 'mG' or sc == 'mK': isFGK = 1
else: isFGK = 0
if sc[0] == 'd' or sc == 'n-' or sc == 'b-': isDNB = 1
else: isDNB = 0
# diameter and type of planet
num = random.randrange(0, 100)
if zone == 0: # Zone A
if num < 5: planet.type = 'A' # 5%
elif num < 10: planet.type = 'G'; planet.diameter = dice(3, 6, 0) * 10000 # 5%
elif num < 60: planet.type = 'R'; planet.diameter = dice(1, 10, 0) * 1000 # 50% - rock
elif num < 70: planet.type = 'D'; planet.diameter = dice(2, 6, 2) * 1000 # 10% - desert
elif num < 100: planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # 30% - hostile
elif zone == 1: # Zone B
if num < 10: planet.type = 'A' # 10%
elif num < 15: planet.type = 'G'; planet.diameter = dice(3, 6, 0) * 10000 # 5%
elif num < 25: planet.type = 'R'; planet.diameter = dice(1, 10, 0) * 1000 # 10% - rock
elif num < 45: planet.type = 'D'; planet.diameter = dice(2, 6, 2) * 1000 # 20% - desert
elif num < 70: planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # 25% - hostile
elif num < 90:
if isFGK:
planet.type = 'M'; planet.diameter = dice(2, 6, 5) * 1000 # FGK / 20% - marginal
else:
planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # Else / 20% - hostile
elif num < 100:
if isFGK:
# planet.type = 'E'; planet.diameter = dice(2, 6, 5) * 1000
planet.type = 'E'; planet.diameter = dice(1, 4, 13) * 1000 # FGK / 10% - terran
else:
planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # Else / 10% - hostile
elif zone == 2: # Zone C
if num < 15: planet.type = 'A' # 15%
elif num < 75: planet.type = 'G'; planet.diameter = dice(3, 6, 0) * 10000 # 60%
elif num < 80: planet.type = 'R'; planet.diameter = dice(1, 10, 0) * 1000 # 5% - rock
elif num < 90: planet.type = 'C'; planet.diameter = dice(1, 10, 0) * 1000 # 10% - cold
elif num < 95: planet.type = 'D'; planet.diameter = dice(2, 6, 2) * 1000 # 5% - desert
elif num < 100:
if isDNB:
planet.type = 'C'; planet.diameter = dice(1, 10, 0) * 1000 # DNB / 5% - cold
else:
planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # Else / 5% - hostile
# energy
planet.energy = random.randrange(100 - zone * 50, 150 - zone * 50)
# minerals
if planet.type[0] in ('R', 'D', 'H', 'M'):
density = dice(1, 6, 0) / 2.0 + 3
planet.minerals = int(((planet.diameter / 500.0) + density * 10.0 + random.randrange(1, 101) / 2.0 - 45) * 2)
elif planet.type[0] == 'A':
diameter = dice(1, 10, 0) * 1000 # rock planet
density = dice(1, 6, 0) / 2.0 + 3
planet.minerals = int(((diameter / 500.0) + density * 10.0 + random.randrange(1, 101) / 2.0 - 45) * 2)
elif planet.type[0] == 'G':
diameter = dice(3, 6, 1) * 1000 # earth like planet
density = dice(1, 6, 0) / 2.0 + 3
planet.minerals = int(((diameter / 500.0) + density * 10.0 + random.randrange(1, 101) / 2.0 - 45) * 2)
elif planet.type == 'E':
planet.minerals = 100
else:
planet.minerals = 0
if planet.minerals < 0:
planet.minerals = 0
# environment
if planet.type == 'E': planet.environ = 100
elif planet.type == 'M': planet.environ = random.randrange(25, 51)
elif planet.type == 'H': planet.environ = random.randrange(12, 26)
elif planet.type == 'D': planet.environ = random.randrange(6, 13)
elif planet.type == 'C': planet.environ = random.randrange(0, 7)
elif planet.type == 'R': planet.environ = random.randrange(0, 7)
else: planet.environ = 0
# slots
slotsMod = 0.67
planet.maxSlots = int((planet.diameter / 1000) * 1.5 * slotsMod)
if planet.type == 'E': planet.slots = 9 # planet.slots = int(planet.maxSlots * 0.50)
elif planet.type == 'M': planet.slots = int(planet.maxSlots * 0.50)
elif planet.type == 'H': planet.slots = int(planet.maxSlots * 0.50)
elif planet.type == 'D': planet.slots = int(planet.maxSlots * 0.75)
elif planet.type == 'C': planet.slots = int(planet.maxSlots * 0.75)
elif planet.type == 'R': planet.slots = int(planet.maxSlots * 0.75)
else: planet.slots = 0
# make sure that all planets except A and G has at least one slot
if planet.type in "EMHDCR" and planet.slots == 0:
#@print "Fixing slots", planet.type, planet.slots, planet.maxSlots
planet.maxSlots = max(1, planet.maxSlots)
planet.slots = max(1, planet.slots)
#print planet.type, planet.environ, planet.minerals
def dice(num, range, offset):
result = offset
for i in xrange(0, num):
result += random.randrange(1, range + 1)
return result
def shiftSystems(galaxy, min, max, delta):
print 'Shifting...'
min = min * min
max = max * max
minMinDist = 1000000
maxMinDist = 0
for system1 in galaxy.systems:
if not system1._moveable:
continue
minDist = [1000000, 100000, 100000]
closestSystems = [None, None, None]
for system2 in galaxy.systems:
if system1 == system2 or not system2._moveable:
continue
dist = (system1.x - system2.x) ** 2 + (system1.y - system2.y) ** 2
if dist < minDist[0]:
minDist.pop()
minDist.insert(0, dist)
closestSystems.pop()
closestSystems.insert(0, system2)
elif dist < minDist[1]:
minDist.pop()
minDist.insert(1, dist)
closestSystems.pop()
closestSystems.insert(1, system2)
elif dist < minDist[2]:
minDist.pop()
minDist.insert(2, dist)
closestSystems.pop()
closestSystems.insert(2, system2)
system1._closest = closestSystems
for closestSystem in closestSystems:
if not closestSystem:
continue
dist = (system1.x - closestSystem.x) ** 2 + (system1.y - closestSystem.y) ** 2
if dist < min and closestSystem:
# move system away
if system1.x > closestSystem.x:
system1.x += random.uniform(0, delta)
closestSystem.x -= random.uniform(0, delta)
else:
system1.x -= random.uniform(0, delta)
closestSystem.x += random.uniform(0, delta)
if system1.y > closestSystem.y:
system1.y += random.uniform(0, delta)
closestSystem.y -= random.uniform(0, delta)
else:
system1.y -= random.uniform(0, delta)
closestSystem.y += random.uniform(0, delta)
elif dist > max and closestSystem:
# move systems closer
if system1.x < closestSystem.x:
system1.x += random.uniform(0, delta)
closestSystem.x -= random.uniform(0, delta)
else:
system1.x -= random.uniform(0, delta)
closestSystem.x += random.uniform(0, delta)
if system1.y < closestSystem.y:
system1.y += random.uniform(0, delta)
closestSystem.y -= random.uniform(0, delta)
else:
system1.y -= random.uniform(0, delta)
closestSystem.y += random.uniform(0, delta)
if dist < minMinDist: minMinDist = dist
if dist > maxMinDist: maxMinDist = dist
print 'Finished [min. dist = <%.2f; %.2f>]' % (math.sqrt(minMinDist), math.sqrt(maxMinDist))
return math.sqrt(minMinDist), math.sqrt(maxMinDist)
## info
def getInfo(galaxy):
starTypes = {}
planetTypes = {}
planets = 0
maxPlanets = 0
minPlanets = 999
planetDist = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for system in galaxy.systems:
starTypes[system.starClass] = starTypes.get(system.starClass, 0) + 1
for planet in system.planets:
planetTypes[planet.type] = planetTypes.get(planet.type, 0) + 1
planets += 1
sysPlanets = len(system.planets)
maxPlanets = max(maxPlanets, sysPlanets)
minPlanets = min(minPlanets, sysPlanets)
planetDist[sysPlanets] += 1
stars = len(galaxy.systems)
print 'Systems:', stars
print starTypes
print 'Planets per system:', planetDist
print 'Planets:', planets
print 'min %d, max %d, avg %.2f' % (minPlanets, maxPlanets, float(planets) / stars)
print 'Types:', planetTypes
return stars, starTypes, planets, planetTypes
## saving
def saveGalaxy(id, galaxy):
print 'Saving...'
# names
loadSystemNames()
# save
fh = open('galaxy-%s.xml' % id, 'w')
print >>fh, '<?xml version="1.0" encoding="UTF-8"?>'
print >>fh, '<universe>'
print >>fh, '\t<galaxy id="%s" x="%.2f" y="%.2f">' % (
id, galaxy.centerX, galaxy.centerY
)
print >>fh, '\t\t<properties radius="%.2f"/>' % galaxy.radius
for system in galaxy.systems:
saveSystem(fh, system)
print >>fh, '\t</galaxy>'
print >>fh, '</universe>'
fh.close()
print 'Saved.'
def saveSystem(fh, system):
print >>fh, '\t\t<system x="%.2f" y="%.2f">' % (system.x, system.y)
# name = 'SCN-%04d%04d' % (system.x * 10, system.y * 10)
global systemNames
name = random.choice(systemNames)
systemNames.remove(name)
print >>fh, '\t\t\t<properties starClass="%s%d" name="%s"/>' % \
(system.starClass, system.starSubclass, name)
for planet in system.planets:
savePlanet(fh, planet)
print >>fh, '\t\t</system>'
def savePlanet(fh, planet):
print >>fh, '\t\t\t<planet>'
print >>fh, '\t\t\t\t<properties plType="%s" plMin="%d" plBio="%d" plEn="%d" plDiameter="%d" plSlots="%d" plMaxSlots="%d" plStratRes="%d" plDisease="%d" plStarting="%d"/>' % \
(planet.type, planet.minerals, planet.environ, planet.energy, planet.diameter, planet.slots, planet.maxSlots, planet.strategicRes, planet.disease, planet.starting)
if planet.starting:
print >>fh, '\t\t\t\t<startingpoint/>'
print >>fh, '\t\t\t</planet>'
## drawing
stars = {
'cB': (0x99, 0xff, 0xff, 8),
'cA': (0xff, 0xff, 0xff, 8),
'cF': (0xff, 0xff, 0x99, 8),
'cG': (0xff, 0xff, 0x00, 8),
'cK': (0xff, 0x99, 0x00, 8),
'cM': (0xff, 0x00, 0x00, 8),
'gF': (0xff, 0xff, 0x99, 4),
'gG': (0xff, 0xff, 0x00, 4),
'gK': (0xff, 0x99, 0x00, 4),
'gM': (0xff, 0x00, 0x00, 4),
'mO': (0x00, 0xff, 0xff, 2),
'mB': (0x99, 0xff, 0xff, 2),
'mA': (0xff, 0xff, 0xff, 2),
'mF': (0xff, 0xff, 0x99, 2),
'mG': (0xff, 0xff, 0x00, 2),
'mK': (0xff, 0x99, 0x00, 2),
'mM': (0xff, 0x00, 0x00, 2),
'dB': (0x99, 0xff, 0xff, 1),
'dA': (0xff, 0xff, 0xff, 1),
'dF': (0xff, 0xff, 0x99, 1),
'dG': (0xff, 0xff, 0x00, 1),
'dK': (0xff, 0x99, 0x00, 1),
'n-': (0xff, 0x00, 0xff, 1),
'b-': (0xff, 0x00, 0x00, 1),
}
screen = None
scale = 5
def drawGalaxy(galaxy, showStarting, showSRes, showDiseases):
screen.fill((0x00, 0x00, 0x00))
for system in galaxy.systems:
x = int(system.x * scale)
y = int(system.y * scale)
r, g, b, radius = stars[system.starClass]
for planet in system.planets:
if planet.starting and showStarting:
screen.fill((0xff, 0xff, 0xff), (x - 2 , y - 2, radius + 4, radius + 4))
if planet.strategicRes in showSRes:
screen.fill((0xff, 0xff, 0x00), (x - 2 , y - 2, radius + 4, radius + 4))
if planet.disease in showDiseases:
screen.fill((0x00, 0xff, 0xff), (x - 2 , y - 2, radius + 4, radius + 4))
if planet.type == 'E':
screen.fill((0x00, 0x00, 0xff), (x - 1 , y - 1, radius + 2, radius + 2))
if planet.type == 'M':
screen.fill((0x00, 0xc0, 0x00), (x - 1 , y - 1, radius + 2, radius + 2))
screen.fill((r, g, b), (x, y, radius, radius))
pygame.display.flip()
pygame.event.pump()
def drawDistances(galaxy, min, max):
min = min * min
max = max * max
screen.fill((0x00, 0x00, 0x00))
for system in galaxy.systems:
x = int(system.x * scale)
y = int(system.y * scale)
for tmp in system._closest:
x2 = int(tmp.x * scale)
y2 = int(tmp.y * scale)
dist = (system.x - tmp.x) ** 2 + (system.y - tmp.y) ** 2
if dist < min: color = (0xff, 0xff, 0x00)
elif dist > max: color = (0x00, 0xff, 0x00)
else: color = (0x80, 0x80, 0x80)
pygame.draw.line(screen, color, (x, y), (x2, y2), 1)
pygame.display.flip()
pygame.event.pump()
def main():
global screen
flags = SWSURFACE
pygame.init()
bestdepth = pygame.display.mode_ok((1000, 720), flags, 32)
screen = pygame.display.set_mode((1000, 720), flags, bestdepth)
pygame.mouse.set_visible(1)
pygame.display.set_caption('OSGen')
galaxy = None
showStarting = 1
showSRes = [1, 2, 3, 4, 5, 6, 7, 8]
showDiseases = [1, 2, 3, 4, 5, 6, 7, 8]
while 1:
evt = pygame.event.wait()
if evt.type == QUIT:
break
elif evt.type == KEYUP and evt.key == K_ESCAPE:
break
elif evt.type == KEYUP and evt.key == K_g:
galaxy = Galaxy()
generateGalaxy2(galaxy)
drawGalaxy(galaxy, showStarting, showSRes, showDiseases)
stars, starTypes, planet, planetTypes = getInfo(galaxy)
elif evt.type == KEYUP and evt.key == K_a:
galaxy = Galaxy()
generateGalaxy(galaxy)
drawGalaxy(galaxy, showStarting, showSRes, showDiseases)
stars, starTypes, planet, planetTypes = getInfo(galaxy)
elif evt.type == KEYUP and evt.key == K_h:
step = 0
while step < 25:
min, max = shiftSystems(galaxy, 1.5, 5.0, 0.25)
drawDistances(galaxy, 1.5, 5.0)
if min >= 1.0 and max <= 5.0:
break
step += 1
drawGalaxy(galaxy, showStarting, showSRes, showDiseases)
elif evt.type == KEYUP and evt.key == K_s:
saveGalaxy(galaxyID, galaxy)
elif evt.type == KEYUP and evt.key == K_i:
getInfo(galaxy)
elif evt.type == KEYUP and evt.key == K_0:
showStarting = not showStarting
drawGalaxy(galaxy, showStarting, showSRes, showDiseases)
elif evt.type == KEYUP and evt.key >= K_1 and evt.key <= K_8:
sr = evt.key - ord("0")
if evt.mod & KMOD_CTRL:
if sr in showDiseases:
showDiseases.remove(sr)
else:
showDiseases.append(sr)
else:
if sr in showSRes:
showSRes.remove(sr)
else:
showSRes.append(sr)
#@print "Showing resources", showSRes
drawGalaxy(galaxy, showStarting, showSRes, showDiseases)
## load names
systemNames = []
def loadSystemNames():
global systemNames
names = {}
for line in file(sys.argv[1]):
names[line.strip()] = None
systemNames = names.keys()
# speedup
try:
import psyco
psyco.full()
except ImportError:
pass
main()
|
OuterDeepSpace/OuterDeepSpace
|
generators/osgen2.py
|
Python
|
gpl-2.0
| 27,347
| 0.037774
|
from data.database.sourceGroupAssignmentTable import getSourceIdToAssignedGroups
from data.database.sourceGroupTable import getAllSourceGroupNames
from data.database.sourceTable import getAllSources
(categoryToSourceObjects, sourceCategoryNames, sourceIdToAssignments, sourceIdToSourceObject,
unCategorizedSource) = ({}, None, None, None, None)
def getSourceById(sourceId):
return sourceIdToSourceObject[sourceId]
def getSourceCategoryNames():
return sourceCategoryNames
def getSources(categoryName):
return categoryToSourceObjects[categoryName]
def getUncategorizedSource():
return unCategorizedSource
def _addToCategoryLookup(source):
global categoryToSourceObjects
for c in source.categories:
if c in categoryToSourceObjects:
categoryToSourceObjects[c].append(source)
else:
categoryToSourceObjects[c] = [source]
def __sourceToCategorys(source):
source_id = source.lookupId
if (source_id in sourceIdToAssignments):
return sourceIdToAssignments[source_id]
else:
return []
def initSourceManager():
global sourceCategoryNames, sourceIdToAssignments, sourceIdToSourceObject, unCategorizedSource
unCategorizedSource = []
sourceCategoryNames = getAllSourceGroupNames()
sourceIdToAssignments = getSourceIdToAssignedGroups()
sourceIdToSourceObject = {}
for s in getAllSources():
s.categories = __sourceToCategorys(s)
sourceIdToSourceObject[s.lookupId] = s
if(len(s.categories) != 0):
_addToCategoryLookup(s)
else:
unCategorizedSource.append(s)
|
TobyRoseman/PS4M
|
engine/sourceManager.py
|
Python
|
mit
| 1,630
| 0.007362
|
import logging
import math
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import View
from experiments_manager.models import Experiment
from marketplace.models import (ExternalPackage, InternalPackage, Package,
PackageResource, PackageVersion)
from .forms import RegisterForm, WorkbenchUserForm
from .models import WorkbenchUser, get_workbench_user
logger = logging.getLogger(__name__)
@login_required
def index(request):
workbench_user = WorkbenchUser.objects.get(user=request.user)
experiments = Experiment.objects.filter(owner=workbench_user).order_by('-created')[:5]
packages = InternalPackage.objects.filter(owner=workbench_user).order_by('-created')[:5]
logger.info('%s accessed index', workbench_user)
recent_versions = list(PackageVersion.objects.all().order_by('-created')[:5])
recent_resources = list(PackageResource.objects.all().order_by('-created')[:5])
recent_internal = list(InternalPackage.objects.all().order_by('-created')[:5])
recent_external = list(ExternalPackage.objects.all().order_by('-created')[:5])
recent_experiments = list(Experiment.objects.filter(public=True).order_by('created')[:5])
total_list = recent_versions + recent_resources + recent_internal + recent_external + recent_experiments
total_list = reversed(sorted(total_list, key=lambda x: x.created))
return render(request, 'index.html', {'experiments': experiments,
'packages': packages,
'activities': total_list})
class DetailProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
return render(request, "user_manager/workbenchuser_detail.html", {'workbench_user': workbench_user})
class EditProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(instance=workbench_user)
logger.info('%s edit get profile view', workbench_user)
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def post(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(request.POST, instance=workbench_user)
if form.is_valid():
current_password = form.cleaned_data['current_password']
user = workbench_user.user
if current_password:
if user.check_password(current_password) and change_password_of_user(workbench_user, form):
messages.add_message(request, messages.SUCCESS, 'Your password has been changed.')
else:
messages.add_message(request, messages.ERROR, 'Passwords did not match '
'or incorrect current password.')
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
form.save()
logger.info('%s edited profile successfully', workbench_user)
return redirect(to='/')
else:
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def change_password_of_user(w_user, form):
new_password = form.cleaned_data['new_password']
new_password_again = form.cleaned_data['new_password_again']
if new_password == new_password_again:
user = w_user.user
user.set_password(new_password)
user.save()
return True
return False
class RegisterView(View):
def get(self, request):
form = RegisterForm()
return render(request, 'user_manager/register.html', {'form': form})
def post(self, request):
form = RegisterForm(self.request.POST)
if form.is_valid():
new_email = form.cleaned_data['email']
if not existing_user_check(new_email):
user = User.objects.create_user(form.cleaned_data['username'],
new_email,
form.cleaned_data['password'])
workbench_user = WorkbenchUser.objects.get(user=user)
workbench_user.netid = form.cleaned_data['netid']
workbench_user.save()
logger.info('new user created: %s', workbench_user)
return redirect(to='/')
else:
return render(request, 'user_manager/register.html', {'form': form})
else:
return render(request, 'user_manager/register.html', {'form': form})
def existing_user_check(email_address):
return User.objects.filter(email=email_address)
class WorkbenchUserDetailView(View):
def get(self, request, username):
workbench_user = get_object_or_404(WorkbenchUser, user__username=username)
recent_experiments = Experiment.objects.filter(owner=workbench_user, completed=True).order_by('-created')[:5]
recent_packages = Package.objects.filter(owner=workbench_user).order_by('-created')[:5]
return render(request, "user_manager/user_profile.html", {'w_user': workbench_user,
'experiments': recent_experiments,
'packages': recent_packages})
@login_required
def search(request):
if 'q' in request.GET:
q = request.GET.get('q')
page = request.GET.get('page')
page = int(page) if page is not None else 1
results, nr_of_pages = get_search_results(request.user, q, page)
return render(request, 'search.html', {'results': results, 'query': q, 'page': page,
'next_page': page + 1,
'previous_page': page - 1,
'nr_of_pages': nr_of_pages,
'nr_of_pages_range': range(1, nr_of_pages+1)})
return render(request, 'search.html', {})
def get_search_results(user, q, page_nr=1, page_size=25):
start_value = (page_nr - 1) * page_size
end_value = start_value + page_size
search_query_list = build_search_queries(q, user)
total_count = sum([x.count() for x in search_query_list])
nr_of_pages = int(math.ceil(total_count / page_size))
total_list = [list(x.order_by('-created')[start_value:end_value]) for x in search_query_list]
total_flat_list = [item for sublist in total_list for item in sublist]
total_flat_list = sorted(total_flat_list, key=lambda x: x.created)
return total_flat_list, nr_of_pages
def build_search_queries(q, user):
package_version_query = PackageVersion.objects.filter(version_nr__contains=q)
package_resource_query = PackageResource.objects.filter(title__contains=q)
internal_package_query = InternalPackage.objects.filter(name__contains=q)
external_package_query = ExternalPackage.objects.filter(name__contains=q)
users_query = WorkbenchUser.objects.filter(user__username=q)
experiment_query = Experiment.objects.filter(Q(owner__user=user, title__contains=q) |
Q(completed=True, title__contains=q))
return package_version_query, package_resource_query, internal_package_query, external_package_query, \
experiment_query, users_query
|
MOOCworkbench/MOOCworkbench
|
user_manager/views.py
|
Python
|
mit
| 7,680
| 0.004297
|
#!/usr/bin/env python
#Demo code
#
# simple demonstration script showing real-time thermal Imaging
# using the MLX90620 16x4 thermopile array and the mlxd daemon
#
# Copyright (C) 2015 Chuck Werbick
#
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import time
import picamera
import numpy as np
import subprocess
import os, sys
import datetime
import skimage
from skimage import io, exposure, transform, img_as_float, img_as_ubyte
from time import sleep
import matplotlib
import matplotlib.pyplot as plt
# IR registration parameters
ROT = np.deg2rad(90)
SCALE = (36.2, 36.4)
OFFSET = (530, 170)
def getImage():
fn = r'/home/pi/tmp.jpg';
proc = subprocess.Popen('raspistill -o %s -w 640 -h 480 -n -t 3' %(fn),
shell=True, stderr=subprocess.STDOUT)
proc.wait()
im = io.imread(fn, as_grey=True)
im = exposure.equalize_hist(im)
return skimage.img_as_ubyte(im)
im = getImage()
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 20
camera.start_preview()
# get the temperature array, and align with the image
fifo = open('/var/run/mlx90620.sock', 'r')
# get the whole FIFO
ir_raw = fifo.read()
# trim to 128 bytes
ir_trimmed = ir_raw[0:128]
# go all numpy on it
ir = np.frombuffer(ir_trimmed, np.uint16)
# set the array shape to the sensor shape (16x4)
ir = ir.reshape((16, 4))[::-1, ::-1]
ir = img_as_float(ir)
# stretch contrast on our heat map
p2, p98 = np.percentile(ir, (2, 98))
ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
# increase even further? (optional)
# ir = exposure.equalize_hist(ir)
# turn our array into pretty colors
cmap = plt.get_cmap('spectral')
rgba_img = cmap(ir)
rgb_img = np.delete(rgba_img, 3, 2)
# align the IR array with the camera
tform = transform.AffineTransform(scale=SCALE, rotation=ROT, translation=OFFSET)
ir_aligned = transform.warp(rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
# turn it back into a ubyte so it'll display on the preview overlay
ir_byte = img_as_ubyte(ir_aligned)
#add the overlay
o = camera.add_overlay(np.getbuffer(ir_byte), layer=3, alpha=90)
#update loop
while True:
sleep(0.25)
ir_raw = fifo.read()
ir_trimmed = ir_raw[0:128]
ir = np.frombuffer(ir_trimmed, np.uint16)
ir = ir.reshape((16, 4))[::-1, ::-1]
ir = img_as_float(ir)
p2, p98 = np.percentile(ir, (2, 98))
ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
ir = exposure.equalize_hist(ir)
cmap = plt.get_cmap('spectral')
rgba_img = cmap(ir)
rgb_img = np.delete(rgba_img, 3, 2)
# align the IR array with the image
tform = transform.AffineTransform(scale=SCALE, rotation=ROT, translation=OFFSET)
ir_aligned = transform.warp(rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
ir_byte = img_as_ubyte(ir_aligned)
o.update(np.getbuffer(ir_byte))
print('Error! Closing...')
camera.remove_overlay(o)
fifo.close()
|
alphacharlie/mlxd
|
mlxview.py
|
Python
|
gpl-2.0
| 3,859
| 0.007256
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0002_recipecollection_title'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='recipe',
name='tags',
field=models.ManyToManyField(to='recipes.Tag', related_name='recipes'),
preserve_default=True,
),
]
|
agnethesoraa/recipemaster
|
recipemaster/recipes/migrations/0003_auto_20150325_2130.py
|
Python
|
mit
| 820
| 0.002439
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test-appropriate entry points into the gRPC Python Beta API."""
import grpc
from grpc.beta import implementations
def not_really_secure_channel(
host, port, channel_credentials, server_host_override):
"""Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host to which to connect.
channel_credentials: The implementations.ChannelCredentials with which to
connect.
server_host_override: The target name used for SSL host name checking.
Returns:
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
target = '%s:%d' % (host, port)
channel = grpc.secure_channel(
target, channel_credentials._credentials,
((b'grpc.ssl_target_name_override', server_host_override,),))
return implementations.Channel(channel)
|
leifurhauks/grpc
|
src/python/grpcio/tests/unit/beta/test_utilities.py
|
Python
|
bsd-3-clause
| 2,422
| 0.002064
|
from django.conf.urls.defaults import patterns, include, url
from rest_framework.urlpatterns import format_suffix_patterns
from startups import views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('startups.views',
# Examples:
# url(r'^$', 'angellist_demo.views.home', name='home'),
# url(r'^angellist_demo/', include('angellist_demo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^startups$', 'startup_list'),
url(r'^startups/(?P<pk>[0-9]+)$', 'startup_detail'),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
PredictionIO/Demo-AngelList
|
backend/angellist_demo/urls.py
|
Python
|
apache-2.0
| 840
| 0.009524
|
import copy
import warnings
import collections
import tempfile
import sys
import os
import sqlite3
import six
from textwrap import dedent
from gffutils import constants
from gffutils import version
from gffutils import bins
from gffutils import helpers
from gffutils import feature
from gffutils import interface
from gffutils import iterators
import logging
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def deprecation_handler(kwargs):
"""
As things change from version to version, deal with them here.
"""
# After reconsidering, let's leave `infer_gene_extent` for another release.
# But when it's time to deprecate it, use this code:
if 0:
if 'infer_gene_extent' in kwargs:
raise ValueError(
"'infer_gene_extent' is deprecated as of version 0.8.4 in "
"favor of more granular control over inferring genes and/or "
"transcripts. The previous default was "
"'infer_gene_extent=True`, which corresponds to the new "
"defaults "
"'disable_infer_genes=False' and "
"'disable_infer_transcripts=False'. Please see the docstring "
"for gffutils.create_db for details.")
if len(kwargs) > 0:
raise TypeError("unhandled kwarg in %s" % kwargs)
class _DBCreator(object):
def __init__(self, data, dbfn, force=False, verbose=False, id_spec=None,
merge_strategy='merge', checklines=10, transform=None,
force_dialect_check=False, from_string=False, dialect=None,
default_encoding='utf-8',
disable_infer_genes=False,
disable_infer_transcripts=False,
infer_gene_extent=True,
force_merge_fields=None,
text_factory=sqlite3.OptimizedUnicode,
pragmas=constants.default_pragmas, _keep_tempfiles=False,
**kwargs):
"""
Base class for _GFFDBCreator and _GTFDBCreator; see create_db()
function for docs
"""
self._keep_tempfiles = _keep_tempfiles
if force_merge_fields is None:
force_merge_fields = []
if merge_strategy == 'merge':
if set(['start', 'end']).intersection(force_merge_fields):
raise ValueError("Can't merge start/end fields since "
"they must be integers")
warn = set(force_merge_fields)\
.intersection(['frame', 'strand'])
for w in warn:
warnings.warn(
"%s field will be merged for features with the same ID; "
"this may result in unusable features." % w)
self.force_merge_fields = force_merge_fields
self.pragmas = pragmas
self.merge_strategy = merge_strategy
self.default_encoding = default_encoding
if not infer_gene_extent:
warnings.warn("'infer_gene_extent' will be deprecated. For now, "
"the following equivalent values were automatically "
"set: 'disable_infer_genes=True', "
"'disable_infer_transcripts=True'. Please use these "
"instead in the future.")
disable_infer_genes = True
disable_infer_transcripts = True
self.disable_infer_genes = disable_infer_genes
self.disable_infer_transcripts = disable_infer_transcripts
self._autoincrements = collections.defaultdict(int)
if force:
if os.path.exists(dbfn):
os.unlink(dbfn)
self.dbfn = dbfn
self.id_spec = id_spec
if isinstance(dbfn, six.string_types):
conn = sqlite3.connect(dbfn)
else:
conn = dbfn
self.conn = conn
self.conn.row_factory = sqlite3.Row
self.set_verbose(verbose)
if text_factory is not None:
if self.verbose == 'debug':
logger.debug('setting text factory to %s' % text_factory)
self.conn.text_factory = text_factory
self._data = data
self._orig_logger_level = logger.level
self.iterator = iterators.DataIterator(
data=data, checklines=checklines, transform=transform,
force_dialect_check=force_dialect_check, from_string=from_string,
dialect=dialect
)
def set_verbose(self, verbose=None):
if verbose == 'debug':
logger.setLevel(logging.DEBUG)
elif verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.ERROR)
self.verbose = verbose
def _increment_featuretype_autoid(self, key):
self._autoincrements[key] += 1
return '%s_%s' % (key, self._autoincrements[key])
def _id_handler(self, f):
"""
Given a Feature from self.iterator, figure out what the ID should be.
This uses `self.id_spec` identify the ID.
"""
# If id_spec is a string, convert to iterable for later
if isinstance(self.id_spec, six.string_types):
id_key = [self.id_spec]
elif hasattr(self.id_spec, '__call__'):
id_key = [self.id_spec]
# If dict, then assume it's a feature -> attribute mapping, e.g.,
# {'gene': 'gene_id'} for GTF
elif isinstance(self.id_spec, dict):
try:
id_key = self.id_spec[f.featuretype]
if isinstance(id_key, six.string_types):
id_key = [id_key]
# Otherwise, use default auto-increment.
except KeyError:
return self._increment_featuretype_autoid(f.featuretype)
# Otherwise assume it's an iterable.
else:
id_key = self.id_spec
# Then try them in order, returning the first one that works:
for k in id_key:
if hasattr(k, '__call__'):
_id = k(f)
if _id:
if _id.startswith('autoincrement:'):
return self._increment_featuretype_autoid(_id[14:])
return _id
else:
# use GFF fields rather than attributes for cases like :seqid:
# or :strand:
if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'):
# No [0] here -- only attributes key/vals are forced into
# lists, not standard GFF fields.
return getattr(f, k[1:-1])
else:
try:
return f.attributes[k][0]
except (KeyError, IndexError):
pass
# If we get here, then default autoincrement
return self._increment_featuretype_autoid(f.featuretype)
def _get_feature(self, ID):
c = self.conn.cursor()
results = c.execute(
constants._SELECT + ' WHERE id = ?', (ID,)).fetchone()
return feature.Feature(dialect=self.iterator.dialect, **results)
def _do_merge(self, f, merge_strategy, add_duplicate=False):
"""
Different merge strategies upon name conflicts.
"error":
Raise error
"warning"
Log a warning
"merge":
Combine old and new attributes -- but only if everything else
matches; otherwise error. This can be slow, but is thorough.
"create_unique":
Autoincrement based on the ID, always creating a new ID.
"replace":
Replaces existing database feature with `f`.
"""
if merge_strategy == 'error':
raise ValueError("Duplicate ID {0.id}".format(f))
if merge_strategy == 'warning':
logger.warning(
"Duplicate lines in file for id '{0.id}'; "
"ignoring all but the first".format(f))
return None, merge_strategy
elif merge_strategy == 'replace':
return f, merge_strategy
# This is by far the most complicated strategy.
elif merge_strategy == 'merge':
# Recall that if we made it to this method, there was at least one
# ID collision.
# This will eventually contain the features that match ID AND that
# match non-attribute fields like start, stop, strand, etc.
features_to_merge = []
# Iterate through all features that have the same ID according to
# the id_spec provided.
if self.verbose == "debug":
logger.debug('candidates with same idspec: %s'
% ([i.id for i in self._candidate_merges(f)]))
# If force_merge_fields was provided, don't pay attention to these
# fields if they're different. We are assuming attributes will be
# different, hence the [:-1]
_gffkeys_to_check = list(
set(constants._gffkeys[:-1])
.difference(self.force_merge_fields))
for existing_feature in self._candidate_merges(f):
# Check other GFF fields (if not specified in
# self.force_merge_fields) to make sure they match.
other_attributes_same = True
for k in _gffkeys_to_check:
if getattr(existing_feature, k) != getattr(f, k):
other_attributes_same = False
break
if other_attributes_same:
# All the other GFF fields match. So this existing feature
# should be merged.
features_to_merge.append(existing_feature)
if self.verbose == 'debug':
logger.debug(
'same attributes between:\nexisting: %s'
'\nthis : %s'
% (existing_feature, f))
else:
# The existing feature's GFF fields don't match, so don't
# append anything.
if self.verbose == 'debug':
logger.debug(
'different attributes between:\nexisting: %s\n'
'this : %s'
% (existing_feature, f))
if (len(features_to_merge) == 0):
# No merge candidates found, so we should make a new ID for
# this feature. This can happen when idspecs match, but other
# fields (like start/stop) are different. Call this method
# again, but using the "create_unique" strategy, and then
# record the newly-created ID in the duplicates table.
orig_id = f.id
uniqued_feature, merge_strategy = self._do_merge(
f, merge_strategy='create_unique')
self._add_duplicate(orig_id, uniqued_feature.id)
return uniqued_feature, merge_strategy
# Whoo! Found some candidates to merge.
else:
if self.verbose == 'debug':
logger.debug('num candidates: %s' % len(features_to_merge))
# This is the attributes dictionary we'll be modifying.
merged_attributes = copy.deepcopy(f.attributes)
# Keep track of non-attribute fields (this will be an empty
# dict if no force_merge_fields)
final_fields = dict(
[(field, set([getattr(f, field)]))
for field in self.force_merge_fields])
# Update the attributes
for existing_feature in features_to_merge:
if self.verbose == 'debug':
logger.debug(
'\nmerging\n\n%s\n%s\n' % (f, existing_feature))
for k in existing_feature.attributes.keys():
v = merged_attributes.setdefault(k, [])
v.extend(existing_feature[k])
merged_attributes[k] = v
# Update the set of non-attribute fields found so far
for field in self.force_merge_fields:
final_fields[field].update(
[getattr(existing_feature, field)])
# Set the merged attributes
for k, v in merged_attributes.items():
merged_attributes[k] = list(set(v))
existing_feature.attributes = merged_attributes
# Set the final merged non-attributes
for k, v in final_fields.items():
setattr(existing_feature, k, ','.join(sorted(map(str, v))))
if self.verbose == 'debug':
logger.debug('\nMERGED:\n%s' % existing_feature)
return existing_feature, merge_strategy
elif merge_strategy == 'create_unique':
f.id = self._increment_featuretype_autoid(f.id)
return f, merge_strategy
else:
raise ValueError("Invalid merge strategy '%s'"
% (merge_strategy))
def _add_duplicate(self, idspecid, newid):
"""
Adds a duplicate ID (as identified by id_spec) and its new ID to the
duplicates table so that they can be later searched for merging.
Parameters
----------
newid : str
The primary key used in the features table
idspecid : str
The ID identified by id_spec
"""
c = self.conn.cursor()
try:
c.execute(
'''
INSERT INTO duplicates
(idspecid, newid)
VALUES (?, ?)''',
(idspecid, newid))
except sqlite3.ProgrammingError:
c.execute(
'''
INSERT INTO duplicates
(idspecid, newid)
VALUES (?, ?)''',
(idspecid.decode(self.default_encoding),
newid.decode(self.default_encoding))
)
if self.verbose == 'debug':
logger.debug('added id=%s; new=%s' % (idspecid, newid))
self.conn.commit()
def _candidate_merges(self, f):
"""
Identifies those features that originally had the same ID as `f`
(according to the id_spec), but were modified because of duplicate
IDs.
"""
candidates = [self._get_feature(f.id)]
c = self.conn.cursor()
results = c.execute(
constants._SELECT + '''
JOIN duplicates ON
duplicates.newid = features.id WHERE duplicates.idspecid = ?''',
(f.id,)
)
for i in results:
candidates.append(
feature.Feature(dialect=self.iterator.dialect, **i))
return list(set(candidates))
def _populate_from_lines(self, lines):
raise NotImplementedError
def _update_relations(self):
raise NotImplementedError
def _drop_indexes(self):
c = self.conn.cursor()
for index in constants.INDEXES:
c.execute("DROP INDEX IF EXISTS ?", (index,))
self.conn.commit()
def set_pragmas(self, pragmas):
"""
Set pragmas for the current database connection.
Parameters
----------
pragmas : dict
Dictionary of pragmas; see constants.default_pragmas for a template
and http://www.sqlite.org/pragma.html for a full list.
"""
self.pragmas = pragmas
c = self.conn.cursor()
c.executescript(
';\n'.join(
['PRAGMA %s=%s' % i for i in self.pragmas.items()]
)
)
self.conn.commit()
def _init_tables(self):
"""
Table creation
"""
c = self.conn.cursor()
v = sqlite3.sqlite_version_info
self.set_pragmas(self.pragmas)
c.executescript(constants.SCHEMA)
self.conn.commit()
def _finalize(self):
"""
Various last-minute stuff to perform after file has been parsed and
imported.
In general, if you'll be adding stuff to the meta table, do it here.
"""
c = self.conn.cursor()
c.executemany('''
INSERT INTO directives VALUES (?)
''', ((i,) for i in self.iterator.directives))
c.execute(
'''
INSERT INTO meta (version, dialect)
VALUES (:version, :dialect)''',
dict(version=version.version,
dialect=helpers._jsonify(self.iterator.dialect))
)
c.executemany(
'''
INSERT OR REPLACE INTO autoincrements VALUES (?, ?)
''', list(self._autoincrements.items()))
# These indexes are *well* worth the effort and extra storage: over
# 500x speedup on code like this:
#
# genes = []
# for i in db.features_of_type('snoRNA'):
# for k in db.parents(i, level=1, featuretype='gene'):
# genes.append(k.id)
#
logger.info("Creating relations(parent) index")
c.execute('DROP INDEX IF EXISTS relationsparent')
c.execute('CREATE INDEX relationsparent ON relations (parent)')
logger.info("Creating relations(child) index")
c.execute('DROP INDEX IF EXISTS relationschild')
c.execute('CREATE INDEX relationschild ON relations (child)')
logger.info("Creating features(featuretype) index")
c.execute('DROP INDEX IF EXISTS featuretype')
c.execute('CREATE INDEX featuretype ON features (featuretype)')
self.conn.commit()
self.warnings = self.iterator.warnings
def create(self):
"""
Calls various methods sequentially in order to fully build the
database.
"""
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize()
def update(self, iterator):
self._populate_from_lines(iterator)
self._update_relations()
def execute(self, query):
"""
Execute a query directly on the database.
"""
c = self.conn.cursor()
result = c.execute(query)
for i in result:
yield i
def _insert(self, feature, cursor):
"""
Insert a feature into the database.
"""
try:
cursor.execute(constants._INSERT, feature.astuple())
except sqlite3.ProgrammingError:
cursor.execute(
constants._INSERT, feature.astuple(self.default_encoding))
def _replace(self, feature, cursor):
"""
Insert a feature into the database.
"""
try:
cursor.execute(
constants._UPDATE,
list(feature.astuple()) + [feature.id])
except sqlite3.ProgrammingError:
cursor.execute(
constants._INSERT,
list(feature.astuple(self.default_encoding)) + [feature.id])
class _GFFDBCreator(_DBCreator):
def __init__(self, *args, **kwargs):
"""
_DBCreator subclass specifically for working with GFF files.
create_db() delegates to this class -- see that function for docs
"""
super(_GFFDBCreator, self).__init__(*args, **kwargs)
def _populate_from_lines(self, lines):
c = self.conn.cursor()
self._drop_indexes()
last_perc = 0
logger.info("Populating features")
msg = ("Populating features table and first-order relations: "
"%d features\r")
# c.executemany() was not as much of an improvement as I had expected.
#
# Compared to a benchmark of doing each insert separately:
# executemany using a list of dicts to iterate over is ~15% slower
# executemany using a list of tuples to iterate over is ~8% faster
features_seen = None
_features, _relations = [], []
for i, f in enumerate(lines):
features_seen = i
# Percent complete
if self.verbose:
if i % 1000 == 0:
sys.stderr.write(msg % i)
sys.stderr.flush()
# TODO: handle ID creation here...should be combined with the
# INSERT below (that is, don't IGNORE below but catch the error and
# re-try with a new ID). However, is this doable with an
# execute-many?
f.id = self._id_handler(f)
try:
self._insert(f, c)
except sqlite3.IntegrityError:
fixed, final_strategy = self._do_merge(f, self.merge_strategy)
if final_strategy == 'merge':
c.execute(
'''
UPDATE features SET attributes = ?
WHERE id = ?
''', (helpers._jsonify(fixed.attributes),
fixed.id))
# For any additional fields we're merging, update those as
# well.
if self.force_merge_fields:
_set_clause = ', '.join(
['%s = ?' % field
for field in self.force_merge_fields])
values = [
getattr(fixed, field)
for field in self.force_merge_fields] + [fixed.id]
c.execute(
'''
UPDATE features SET %s
WHERE id = ?
''' % _set_clause, tuple(values))
elif final_strategy == 'replace':
self._replace(f, c)
elif final_strategy == 'create_unique':
self._insert(f, c)
if 'Parent' in f.attributes:
for parent in f.attributes['Parent']:
c.execute(
'''
INSERT OR IGNORE INTO relations VALUES
(?, ?, 1)
''', (parent, f.id))
if features_seen is None:
raise ValueError("No lines parsed -- was an empty file provided?")
self.conn.commit()
if self.verbose:
logger.info(msg % i)
def _update_relations(self):
logger.info("Updating relations")
c = self.conn.cursor()
c2 = self.conn.cursor()
c3 = self.conn.cursor()
# TODO: pre-compute indexes?
# c.execute('CREATE INDEX ids ON features (id)')
# c.execute('CREATE INDEX parentindex ON relations (parent)')
# c.execute('CREATE INDEX childindex ON relations (child)')
# self.conn.commit()
if isinstance(self._keep_tempfiles, six.string_types):
suffix = self._keep_tempfiles
else:
suffix = '.gffutils'
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix).name
fout = open(tmp, 'w')
# Here we look for "grandchildren" -- for each ID, get the child
# (parenthetical subquery below); then for each of those get *its*
# child (main query below).
#
# Results are written to temp file so that we don't read and write at
# the same time, which would slow things down considerably.
c.execute('SELECT id FROM features')
for parent in c:
c2.execute('''
SELECT child FROM relations WHERE parent IN
(SELECT child FROM relations WHERE parent = ?)
''', tuple(parent))
for grandchild in c2:
fout.write('\t'.join((parent[0], grandchild[0])) + '\n')
fout.close()
def relations_generator():
for line in open(fout.name):
parent, child = line.strip().split('\t')
yield dict(parent=parent, child=child, level=2)
c.executemany(
'''
INSERT OR IGNORE INTO relations VALUES
(:parent, :child, :level)
''', relations_generator())
# TODO: Index creation. Which ones affect performance?
c.execute("DROP INDEX IF EXISTS binindex")
c.execute("CREATE INDEX binindex ON features (bin)")
self.conn.commit()
if not self._keep_tempfiles:
os.unlink(fout.name)
class _GTFDBCreator(_DBCreator):
def __init__(self, *args, **kwargs):
"""
create_db() delegates to this class -- see that function for docs
"""
self.transcript_key = kwargs.pop('transcript_key', 'transcript_id')
self.gene_key = kwargs.pop('gene_key', 'gene_id')
self.subfeature = kwargs.pop('subfeature', 'exon')
super(_GTFDBCreator, self).__init__(*args, **kwargs)
def _populate_from_lines(self, lines):
msg = (
"Populating features table and first-order relations: %d "
"features\r"
)
c = self.conn.cursor()
# Only check this many features to see if it's a gene or transcript and
# issue the appropriate warning.
gene_and_transcript_check_limit = 1000
last_perc = 0
lines_seen = 0
for i, f in enumerate(lines):
# See issues #48 and #20.
if lines_seen < gene_and_transcript_check_limit:
if (
f.featuretype == 'transcript' and
not self.disable_infer_transcripts
):
warnings.warn(
"It appears you have a transcript feature in your GTF "
"file. You may want to use the "
"`disable_infer_transcripts` "
"option to speed up database creation")
elif (
f.featuretype == 'gene' and
not self.disable_infer_genes
):
warnings.warn(
"It appears you have a gene feature in your GTF "
"file. You may want to use the "
"`disable_infer_genes` "
"option to speed up database creation")
lines_seen = i + 1
# Percent complete
if self.verbose:
if i % 1000 == 0:
sys.stderr.write(msg % i)
sys.stderr.flush()
f.id = self._id_handler(f)
# Insert the feature itself...
try:
self._insert(f, c)
except sqlite3.IntegrityError:
fixed, final_strategy = self._do_merge(f, self.merge_strategy)
if final_strategy == 'merge':
c.execute(
'''
UPDATE features SET attributes = ?
WHERE id = ?
''', (helpers._jsonify(fixed.attributes),
fixed.id))
# For any additional fields we're merging, update those as
# well.
if self.force_merge_fields:
_set_clause = ', '.join(
['%s = ?' % field
for field in self.force_merge_fields])
values = [getattr(fixed, field)
for field in self.force_merge_fields]\
+ [fixed.id]
c.execute(
'''
UPDATE features SET %s
WHERE id = ?
''' % _set_clause, values)
elif final_strategy == 'replace':
self._replace(f, c)
elif final_strategy == 'create_unique':
self._insert(f, c)
# For an on-spec GTF file,
# self.transcript_key = "transcript_id"
# self.gene_key = "gene_id"
relations = []
parent = None
grandparent = None
if self.transcript_key in f.attributes:
parent = f.attributes[self.transcript_key][0]
relations.append((parent, f.id, 1))
if self.gene_key in f.attributes:
grandparent = f.attributes[self.gene_key]
if len(grandparent) > 0:
grandparent = grandparent[0]
relations.append((grandparent, f.id, 2))
if parent is not None:
relations.append((grandparent, parent, 1))
# Note the IGNORE, so relationships defined many times in the file
# (e.g., the transcript-gene relation on pretty much every line in
# a GTF) will only be included once.
c.executemany(
'''
INSERT OR IGNORE INTO relations (parent, child, level)
VALUES (?, ?, ?)
''', relations
)
if lines_seen == 0:
raise ValueError("No lines parsed -- was an empty file provided?")
logger.info('Committing changes')
self.conn.commit()
if self.verbose:
logger.info(msg % i)
def _update_relations(self):
if self.disable_infer_genes and self.disable_infer_transcripts:
return
# TODO: do any indexes speed this up?
c = self.conn.cursor()
c2 = self.conn.cursor()
logger.info("Creating relations(parent) index")
c.execute('DROP INDEX IF EXISTS relationsparent')
c.execute('CREATE INDEX relationsparent ON relations (parent)')
logger.info("Creating relations(child) index")
c.execute('DROP INDEX IF EXISTS relationschild')
c.execute('CREATE INDEX relationschild ON relations (child)')
if not (self.disable_infer_genes or self.disable_infer_transcripts):
msg = 'gene and transcript'
elif self.disable_infer_transcripts:
msg = 'gene'
elif self.disable_infer_genes:
msg = 'transcript'
logger.info('Inferring %s extents '
'and writing to tempfile' % msg)
if isinstance(self._keep_tempfiles, six.string_types):
suffix = self._keep_tempfiles
else:
suffix = '.gffutils'
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix).name
fout = open(tmp, 'w')
self._tmpfile = tmp
# This takes some explanation...
#
# First, the nested subquery gets the level-1 parents of
# self.subfeature featuretypes. For an on-spec GTF file,
# self.subfeature = "exon". So this subquery translates to getting the
# distinct level-1 parents of exons -- which are transcripts.
#
# OK, so this first subquery is now a list of transcripts; call it
# "firstlevel".
#
# Then join firstlevel on relations, but the trick is to now consider
# each transcript a *child* -- so that relations.parent (on the first
# line of the query) will be the first-level parent of the transcript
# (the gene).
#
#
# The result is something like:
#
# transcript1 gene1
# transcript2 gene1
# transcript3 gene2
#
# Note that genes are repeated; below we need to ensure that only one
# is added. To ensure this, the results are ordered by the gene ID.
#
# By the way, we do this even if we're only looking for transcripts or
# only looking for genes.
c.execute(
'''
SELECT DISTINCT firstlevel.parent, relations.parent
FROM (
SELECT DISTINCT parent
FROM relations
JOIN features ON features.id = relations.child
WHERE features.featuretype = ?
AND relations.level = 1
)
AS firstlevel
JOIN relations ON firstlevel.parent = child
WHERE relations.level = 1
ORDER BY relations.parent
''', (self.subfeature,))
# Now we iterate through those results (using a new cursor) to infer
# the extent of transcripts and/or genes.
last_gene_id = None
n_features = 0
for transcript_id, gene_id in c:
if not self.disable_infer_transcripts:
# transcript extent
c2.execute(
'''
SELECT MIN(start), MAX(end), strand, seqid
FROM features
JOIN relations ON
features.id = relations.child
WHERE parent = ? AND featuretype == ?
''', (transcript_id, self.subfeature))
transcript_start, transcript_end, strand, seqid = c2.fetchone()
transcript_attributes = {
self.transcript_key: [transcript_id],
self.gene_key: [gene_id]
}
transcript_bin = bins.bins(
transcript_start, transcript_end, one=True)
# Write out to file; we'll be reading it back in shortly. Omit
# score, frame, source, and extra since they will always have
# the same default values (".", ".", "gffutils_derived", and []
# respectively)
fout.write('\t'.join(map(str, [
transcript_id,
seqid,
transcript_start,
transcript_end,
strand,
'transcript',
transcript_bin,
helpers._jsonify(transcript_attributes)
])) + '\n')
n_features += 1
if not self.disable_infer_genes:
# Infer gene extent, but only if we haven't done so already
if gene_id != last_gene_id:
c2.execute(
'''
SELECT MIN(start), MAX(end), strand, seqid
FROM features
JOIN relations ON
features.id = relations.child
WHERE parent = ? AND featuretype == ?
''', (gene_id, self.subfeature))
gene_start, gene_end, strand, seqid = c2.fetchone()
gene_attributes = {self.gene_key: [gene_id]}
gene_bin = bins.bins(gene_start, gene_end, one=True)
fout.write('\t'.join(map(str, [
gene_id,
seqid,
gene_start,
gene_end,
strand,
'gene',
gene_bin,
helpers._jsonify(gene_attributes)
])) + '\n')
last_gene_id = gene_id
n_features += 1
fout.close()
def derived_feature_generator():
"""
Generator of items from the file that was just created...
"""
keys = ['parent', 'seqid', 'start', 'end', 'strand',
'featuretype', 'bin', 'attributes']
for line in open(fout.name):
d = dict(list(zip(keys, line.strip().split('\t'))))
d.pop('parent')
d['score'] = '.'
d['source'] = 'gffutils_derived'
d['frame'] = '.'
d['extra'] = []
d['attributes'] = helpers._unjsonify(d['attributes'])
f = feature.Feature(**d)
f.id = self._id_handler(f)
yield f
# Drop the indexes so the inserts are faster
c.execute('DROP INDEX IF EXISTS relationsparent')
c.execute('DROP INDEX IF EXISTS relationschild')
# Insert the just-inferred transcripts and genes. TODO: should we
# *always* use "merge" here for the merge_strategy?
logger.info("Importing inferred features into db")
last_perc = None
for i, f in enumerate(derived_feature_generator()):
perc = int(i / float(n_features) * 100)
if perc != last_perc:
sys.stderr.write('%s of %s (%s%%)\r' % (i, n_features, perc))
sys.stderr.flush()
last_perc = perc
try:
self._insert(f, c)
except sqlite3.IntegrityError:
fixed, final_strategy = self._do_merge(f, 'merge')
c.execute(
'''
UPDATE features SET attributes = ?
WHERE id = ?
''', (helpers._jsonify(fixed.attributes),
fixed.id))
logger.info("Committing changes")
self.conn.commit()
if not self._keep_tempfiles:
os.unlink(fout.name)
# TODO: recreate indexes?
def create_db(data, dbfn, id_spec=None, force=False, verbose=False,
checklines=10, merge_strategy='error', transform=None,
gtf_transcript_key='transcript_id', gtf_gene_key='gene_id',
gtf_subfeature='exon', force_gff=False,
force_dialect_check=False, from_string=False, keep_order=False,
text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None,
pragmas=constants.default_pragmas, sort_attribute_values=False,
dialect=None, _keep_tempfiles=False, infer_gene_extent=True,
disable_infer_genes=False, disable_infer_transcripts=False,
**kwargs):
"""
Create a database from a GFF or GTF file.
For more details on when and how to use the kwargs below, see the examples
in the online documentation (:ref:`examples`).
Parameters
----------
data : string or iterable
If a string (and `from_string` is False), then `data` is the path to
the original GFF or GTF file.
If a string and `from_string` is True, then assume `data` is the actual
data to use.
Otherwise, it's an iterable of Feature objects.
dbfn : string
Path to the database that will be created. Can be the special string
":memory:" to create an in-memory database.
id_spec : string, list, dict, callable, or None
This parameter guides what will be used as the primary key for the
database, which in turn determines how you will access individual
features by name from the database.
If `id_spec=None`, then auto-increment primary keys based on the
feature type (e.g., "gene_1", "gene_2"). This is also the fallback
behavior for the other values below.
If `id_spec` is a string, then look for this key in the attributes. If
it exists, then use its value as the primary key, otherwise
autoincrement based on the feature type. For many GFF3 files, "ID"
usually works well.
If `id_spec` is a list or tuple of keys, then check for each one in
order, using the first one found. For GFF3, this might be ["ID",
"Name"], which would use the ID if it exists, otherwise the Name,
otherwise autoincrement based on the feature type.
If `id_spec` is a dictionary, then it is a mapping of feature types to
what should be used as the ID. For example, for GTF files, `{'gene':
'gene_id', 'transcript': 'transcript_id'}` may be useful. The values
of this dictionary can also be a list, e.g., `{'gene': ['gene_id',
'geneID']}`
If `id_spec` is a callable object, then it accepts a dictionary from
the iterator and returns one of the following:
* None (in which case the feature type will be auto-incremented)
* string (which will be used as the primary key)
* special string starting with "autoincrement:X", where "X" is
a string that will be used for auto-incrementing. For example,
if "autoincrement:chr10", then the first feature will be
"chr10_1", the second "chr10_2", and so on.
force : bool
If `False` (default), then raise an exception if `dbfn` already exists.
Use `force=True` to overwrite any existing databases.
verbose : bool
Report percent complete and other feedback on how the db creation is
progressing.
In order to report percent complete, the entire file needs to be read
once to see how many items there are; for large files you may want to
use `verbose=False` to avoid this.
checklines : int
Number of lines to check the dialect.
merge_strategy : str
One of {merge, create_unique, error, warning, replace}.
This parameter specifies the behavior when two items have an identical
primary key.
Using `merge_strategy="merge"`, then there will be a single entry in
the database, but the attributes of all features with the same primary
key will be merged.
Using `merge_strategy="create_unique"`, then the first entry will use
the original primary key, but the second entry will have a unique,
autoincremented primary key assigned to it
Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID`
exception will be raised. This means you will have to edit the file
yourself to fix the duplicated IDs.
Using `merge_strategy="warning"`, a warning will be printed to the
logger, and the duplicate feature will be skipped.
Using `merge_strategy="replace" will replace the entire existing
feature with the new feature.
transform : callable
Function (or other callable object) that accepts a `Feature` object and
returns a (possibly modified) `Feature` object.
gtf_transcript_key, gtf_gene_key : string
Which attribute to use as the transcript ID and gene ID respectively
for GTF files. Default is `transcript_id` and `gene_id` according to
the GTF spec.
gtf_subfeature : string
Feature type to use as a "gene component" when inferring gene and
transcript extents for GTF files. Default is `exon` according to the
GTF spec.
force_gff : bool
If True, do not do automatic format detection -- only use GFF.
force_dialect_check : bool
If True, the dialect will be checkef for every feature (instead of just
`checklines` features). This can be slow, but may be necessary for
inconsistently-formatted input files.
from_string : bool
If True, then treat `data` as actual data (rather than the path to
a file).
keep_order : bool
If True, all features returned from this instance will have the
order of their attributes maintained. This can be turned on or off
database-wide by setting the `keep_order` attribute or with this
kwarg, or on a feature-by-feature basis by setting the `keep_order`
attribute of an individual feature.
Note that a single order of attributes will be used for all features.
Specifically, the order will be determined by the order of attribute
keys in the first `checklines` of the input data. See
helpers._choose_dialect for more information on this.
Default is False, since this includes a sorting step that can get
time-consuming for many features.
infer_gene_extent : bool
DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and
`disable_infer_genes` for more granular control.
disable_infer_transcripts, disable_infer_genes : bool
Only used for GTF files. By default -- and according to the GTF spec --
we assume that there are no transcript or gene features in the file.
gffutils then infers the extent of each transcript based on its
constituent exons and infers the extent of each gene bases on its
constituent transcripts.
This default behavior is problematic if the input file already contains
transcript or gene features (like recent GENCODE GTF files for human),
since 1) the work to infer extents is unnecessary, and 2)
trying to insert an inferred feature back into the database triggers
gffutils' feature-merging routines, which can get time consuming.
The solution is to use `disable_infer_transcripts=True` if your GTF
already has transcripts in it, and/or `disable_infer_genes=True` if it
already has genes in it. This can result in dramatic (100x) speedup.
Prior to version 0.8.4, setting `infer_gene_extents=False` would
disable both transcript and gene inference simultaneously. As of
version 0.8.4, these argument allow more granular control.
force_merge_fields : list
If merge_strategy="merge", then features will only be merged if their
non-attribute values are identical (same chrom, source, start, stop,
score, strand, phase). Using `force_merge_fields`, you can override
this behavior to allow merges even when fields are different. This
list can contain one or more of ['seqid', 'source', 'featuretype',
'score', 'strand', 'frame']. The resulting merged fields will be
strings of comma-separated values. Note that 'start' and 'end' are not
available, since these fields need to be integers.
text_factory : callable
Text factory to use for the sqlite3 database. See
https://docs.python.org/2/library/\
sqlite3.html#sqlite3.Connection.text_factory
for details. The default sqlite3.OptimizedUnicode will return Unicode
objects only for non-ASCII data, and bytestrings otherwise.
pragmas : dict
Dictionary of pragmas used when creating the sqlite3 database. See
http://www.sqlite.org/pragma.html for a list of available pragmas. The
defaults are stored in constants.default_pragmas, which can be used as
a template for supplying a custom dictionary.
sort_attribute_values : bool
All features returned from the database will have their attribute
values sorted. Typically this is only useful for testing, since this
can get time-consuming for large numbers of features.
_keep_tempfiles : bool or string
False by default to clean up intermediate tempfiles created during GTF
import. If True, then keep these tempfile for testing or debugging.
If string, then keep the tempfile for testing, but also use the string
as the suffix fo the tempfile. This can be useful for testing in
parallel environments.
Returns
-------
New :class:`FeatureDB` object.
"""
_locals = locals()
# Check if any older kwargs made it in
deprecation_handler(kwargs)
kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs)
# First construct an iterator so that we can identify the file format.
# DataIterator figures out what kind of data was provided (string of lines,
# filename, or iterable of Features) and checks `checklines` lines to
# identify the dialect.
iterator = iterators.DataIterator(**kwargs)
kwargs.update(**_locals)
if dialect is None:
dialect = iterator.dialect
if isinstance(iterator, iterators._FeatureIterator):
# However, a side-effect of this is that if `data` was a generator,
# then we've just consumed `checklines` items (see
# iterators.BaseIterator.__init__, which calls iterators.peek).
#
# But it also chains those consumed items back onto the beginning, and
# the result is available as as iterator._iter.
#
# That's what we should be using now for `data:
kwargs['data'] = iterator._iter
# Since we've already checked lines, we don't want to do it again
kwargs['checklines'] = 0
if force_gff or (dialect['fmt'] == 'gff3'):
cls = _GFFDBCreator
id_spec = id_spec or 'ID'
add_kwargs = dict(
id_spec=id_spec,
)
elif dialect['fmt'] == 'gtf':
cls = _GTFDBCreator
id_spec = id_spec or {'gene': 'gene_id', 'transcript': 'transcript_id'}
add_kwargs = dict(
transcript_key=gtf_transcript_key,
gene_key=gtf_gene_key,
subfeature=gtf_subfeature,
id_spec=id_spec,
)
kwargs.update(**add_kwargs)
kwargs['dialect'] = dialect
c = cls(**kwargs)
c.create()
if dbfn == ':memory:':
db = interface.FeatureDB(c.conn,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
else:
db = interface.FeatureDB(c,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
return db
|
hjanime/gffutils
|
gffutils/create.py
|
Python
|
mit
| 50,151
| 0
|
"""
WSGI config for cv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cv.settings")
application = get_wsgi_application()
|
cthtuf/django-cv
|
cv/wsgi.py
|
Python
|
mit
| 381
| 0
|
"""Support for Spider thermostats."""
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import DOMAIN as SPIDER_DOMAIN
SUPPORT_FAN = ["Auto", "Low", "Medium", "High", "Boost 10", "Boost 20", "Boost 30"]
SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_COOL]
HA_STATE_TO_SPIDER = {
HVAC_MODE_COOL: "Cool",
HVAC_MODE_HEAT: "Heat",
HVAC_MODE_OFF: "Idle",
}
SPIDER_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_SPIDER.items()}
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Spider thermostat."""
if discovery_info is None:
return
devices = [
SpiderThermostat(hass.data[SPIDER_DOMAIN]["controller"], device)
for device in hass.data[SPIDER_DOMAIN]["thermostats"]
]
add_entities(devices, True)
class SpiderThermostat(ClimateEntity):
"""Representation of a thermostat."""
def __init__(self, api, thermostat):
"""Initialize the thermostat."""
self.api = api
self.thermostat = thermostat
@property
def supported_features(self):
"""Return the list of supported features."""
supports = SUPPORT_TARGET_TEMPERATURE
if self.thermostat.has_fan_mode:
supports |= SUPPORT_FAN_MODE
return supports
@property
def unique_id(self):
"""Return the id of the thermostat, if any."""
return self.thermostat.id
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self.thermostat.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.thermostat.target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.thermostat.temperature_steps
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.thermostat.minimum_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.thermostat.maximum_temperature
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return SPIDER_STATE_TO_HA[self.thermostat.operation_mode]
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return SUPPORT_HVAC
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.thermostat.set_temperature(temperature)
def set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
self.thermostat.set_operation_mode(HA_STATE_TO_SPIDER.get(hvac_mode))
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat.current_fan_speed
def set_fan_mode(self, fan_mode):
"""Set fan mode."""
self.thermostat.set_fan_speed(fan_mode)
@property
def fan_modes(self):
"""List of available fan modes."""
return SUPPORT_FAN
def update(self):
"""Get the latest data."""
self.thermostat = self.api.get_thermostat(self.unique_id)
|
nkgilley/home-assistant
|
homeassistant/components/spider/climate.py
|
Python
|
apache-2.0
| 3,866
| 0.000259
|
#!/usr/bin/env python3
# Copyright (c) 2014-present, The osquery authors
#
# This source code is licensed as defined by the LICENSE file found in the
# root directory of this source tree.
#
# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only)
import glob
import os
import signal
import shutil
import time
import unittest
# osquery-specific testing utils
import test_base
class DaemonTests(test_base.ProcessGenerator, unittest.TestCase):
@test_base.flaky
def test_1_daemon_without_watchdog(self):
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
@test_base.flaky
def test_2_daemon_with_option(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
# Assign the variable after we have assurances it exists
self.assertTrue(info_exists())
# Lastly, verify that we have permission to read the file
data = ''
with open(glob.glob(info_path)[0], 'r') as fh:
try:
data = fh.read()
except:
pass
self.assertTrue(len(data) > 0)
daemon.kill()
@test_base.flaky
def test_3_daemon_with_watchdog(self):
# This test does not join the service threads properly (waits for int).
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
daemon.kill()
# This will take a few moments to make sure the client process
# dies when the watcher goes away
self.assertTrue(daemon.isDead(children[0]))
@test_base.flaky
def test_3_daemon_lost_worker(self):
# Test that killed workers are respawned by the watcher
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
# Kill only the child worker
os.kill(children[0], signal.SIGINT)
self.assertTrue(daemon.isDead(children[0]))
self.assertTrue(daemon.isAlive())
# Expect the children of the daemon to be respawned
def waitDaemonChildren():
children = daemon.getChildren()
return len(children) > 0
test_base.expectTrue(waitDaemonChildren)
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
@test_base.flaky
def test_4_daemon_sighup(self):
# A hangup signal should not do anything to the daemon.
daemon = self._run_daemon({
"disable_watchdog": True,
})
self.assertTrue(daemon.isAlive())
# Send SIGHUP on posix. Windows does not have SIGHUP so we use SIGTERM
sig = signal.SIGHUP if os.name != "nt" else signal.SIGTERM
os.kill(daemon.proc.pid, sig)
self.assertTrue(daemon.isAlive())
@test_base.flaky
def test_5_daemon_sigint(self):
# An interrupt signal will cause the daemon to stop.
daemon = self._run_daemon({
"disable_watchdog": True,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Send a SIGINT
os.kill(daemon.pid, signal.SIGINT)
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
@test_base.flaky
def test_6_logger_mode(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
test_mode = 0o754 # Strange mode that should never exist
daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"logger_mode": test_mode,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
# Wait for the daemon to write the info log to disk before continuing
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
results_path = os.path.join(logger_path, "osqueryd.results.log")
def results_exists():
return os.path.exists(results_path)
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
test_base.expectTrue(results_exists)
info_path = glob.glob(info_path)[0]
# Both log files should exist, the results should have the given mode.
for pth in [info_path, results_path]:
self.assertTrue(os.path.exists(pth))
# Only apply the mode checks to .log files.
# TODO: Add ACL checks for Windows logs
if pth.find('.log') > 0 and os.name != "nt":
rpath = os.path.realpath(pth)
mode = os.stat(rpath).st_mode & 0o777
self.assertEqual(mode, test_mode)
daemon.kill()
def test_7_logger_stdout(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"logger_path": logger_path,
"verbose": True,
})
info_path = os.path.join(logger_path, "osqueryd.INFO")
def pathDoesntExist():
if os.path.exists(info_path):
return False
return True
self.assertTrue(daemon.isAlive())
self.assertTrue(pathDoesntExist())
daemon.kill()
def test_8_hostid_uuid(self):
# Test added to test using UUID as hostname ident for issue #3195
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "uuid",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_9_hostid_instance(self):
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "instance",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_config_check_exits(self):
daemon = self._run_daemon({
"config_check": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
def test_config_dump_exits(self):
daemon = self._run_daemon({
"config_dump": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
def test_database_dump_exits(self):
daemon = self._run_daemon({
"database_dump": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
if __name__ == '__main__':
with test_base.CleanChildProcesses():
test_base.Tester().run()
|
hackgnar/osquery
|
tools/tests/test_osqueryd.py
|
Python
|
bsd-3-clause
| 9,307
| 0.000537
|
# -*- coding: utf-8 -*-
actions = {
"up": _(u"Go up in the current buffer"),
"down": _(u"Go down in the current buffer"),
"left": _(u"Go to the previous buffer"),
"right": _(u"Go to the next buffer"),
"next_account": _(u"Focus the next session"),
"previous_account": _(u"Focus the previous session"),
"show_hide": _(u"Show or hide the GUI"),
"post_tweet": _(u"New tweet"),
"post_reply": _(u"Reply"),
"post_retweet": _(u"Retweet"),
"send_dm": _(u"Send direct message"),
"add_to_favourites": _(u"Mark as favourite"),
"remove_from_favourites": _(u"Remove from favourites"),
"follow": _(u"Open the user actions dialogue"),
"user_details": _(u"See user details"),
"view_item": _(u"Show tweet"),
"exit": _(u"Quit"),
"open_timeline": _(u"Open user timeline"),
"remove_buffer": _(u"Destroy buffer"),
"interact": _(u"Interact with the currently focused tweet."),
"url": _(u"Open URL"),
"volume_up": _(u"Increase volume by 5%"),
"volume_down": _(u"Decrease volume by 5%"),
"go_home": _(u"Jump to the first element of a buffer"),
"go_end": _(u"Jump to the last element of the current buffer"),
"go_page_up": _(u"Jump 20 elements up in the current buffer"),
"go_page_down": _(u"Jump 20 elements down in the current buffer"),
"update_profile": _(u"Edit profile"),
"delete": _(u"Delete a tweet or direct message"),
"clear_buffer": _(u"Empty the current buffer"),
"repeat_item": _(u"Repeat last item"),
"copy_to_clipboard": _(u"Copy to clipboard"),
"add_to_list": _(u"Add to list"),
"remove_from_list": _(u"Remove from list"),
"toggle_buffer_mute": _(u"Mute/unmute the active buffer"),
"toggle_session_mute": _(u"Mute/unmute the current session"),
"toggle_autoread": _(u"toggle the automatic reading of incoming tweets in the active buffer"),
"search": _(u"Search on twitter"),
"find": _(u"Find a string in the currently focused buffer"),
"edit_keystrokes": _(u"Show the keystroke editor"),
"view_user_lists": _(u"Show lists for a specified user"),
"get_more_items": _(u"load previous items"),
"reverse_geocode": _(u"Get geolocation"),
"view_reverse_geocode": _(u"Display the tweet's geolocation in a dialog"),
"get_trending_topics": _(u"Create a trending topics buffer"),
"open_conversation": _(u"View conversation"),
}
|
codeofdusk/ProjectMagenta
|
src/keystrokeEditor/constants.py
|
Python
|
gpl-2.0
| 2,205
| 0.021769
|
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
# Author: Nicola Malcontenti <nicola.malcontenti@agilebg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import sale
|
Eficent/sale-workflow
|
product_customer_code_sale/__init__.py
|
Python
|
agpl-3.0
| 880
| 0
|
class Robot:
def __init__(self):
self.__name = ""
@property
def name(self):
return self.__name
@name.setter
def name(self, x):
self.__name = x
class Car:
def __init__(self, model=None):
self.__set_model(model)
def __set_model(self, model):
self.__model = model
def __get_model(self):
return self.__model
model = property(__get_model, __set_model)
x = Robot()
x.name = "apo"
print(x.name)
c = Car()
c.model = "Mercedes"
print(c.model)
|
abdullahcaliskan/Python
|
OOP/props.py
|
Python
|
gpl-2.0
| 467
| 0.038544
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.test import TestCase
from models import Student, StudyGroup, Task, Lab, Subject, GroupSubject
class PortalTest(TestCase):
def setUp(self):
self.study_group1 = StudyGroup.objects.create(name="10А")
self.study_group2 = StudyGroup.objects.create(name="11Б")
self.subject1 = Subject.objects.create(name="Оптика")
self.subject2 = Subject.objects.create(name="Механика")
self.group_subject11 = GroupSubject.objects.create(
study_group=self.study_group1, subject=self.subject1
)
self.group_subject22 = GroupSubject.objects.create(
study_group=self.study_group2, subject=self.subject2
)
self.student1 = Student.objects.create_user(
username="ivan", email=None, password="123456", study_group=self.study_group1
)
self.student2 = Student.objects.create_user(
username="pavel", email=None, password="123456", study_group=self.study_group2
)
self.lab1 = Lab.objects.create(name="Кольца ньютона", subject=self.subject1)
self.lab2 = Lab.objects.create(name="Атвуд", subject=self.subject2)
def test_task_create(self):
has_error = False
try:
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertFalse(has_error)
def test_task_create_double(self):
"""
Должна выскочить ошибка валидации - пытаемся создать 2 одинаковых задания
:return:
"""
has_error = False
try:
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertTrue(has_error)
# Проверяем что по данной учебной группе есть только одно задание
subject = self.group_subject11.subject
study_group = self.group_subject11.study_group
task_count = Task.objects.filter(
lab__subject__pk=subject.id, student__study_group__pk=study_group.id
).count()
self.assertTrue(task_count, 1)
|
vinneyto/lab-portal
|
portal/test_models.py
|
Python
|
bsd-3-clause
| 2,587
| 0.002052
|
#
# Copyright (C) 2008-2013 by Nicolas Piganeau
# npi@m4x.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
import ts2.routing
class RoutesModel(QtCore.QAbstractTableModel):
"""The RoutesModel is a table model for routes that is used in the editor
"""
def __init__(self, editor):
"""Constructor for the RoutesModel class"""
super().__init__()
self._editor = editor
def rowCount(self, parent = QtCore.QModelIndex()):
"""Returns the number of rows of the model, corresponding to the
number of routes."""
return len(self._editor.routes)
def columnCount(self, parent = QtCore.QModelIndex()):
"""Returns the number of columns of the model"""
return 4
def data(self, index, role = Qt.DisplayRole):
"""Returns the data at the given index"""
if role == Qt.DisplayRole or role == Qt.EditRole:
routes = list(sorted(self._editor.routes.values()))
if index.column() == 0:
return routes[index.row()].routeNum
elif index.column() == 1:
return routes[index.row()].beginSignal.name
elif index.column() == 2:
return routes[index.row()].endSignal.name
elif index.column() == 3:
return routes[index.row()].initialState
return None
def setData(self, index, value, role):
"""Updates data when modified in the view"""
if role == Qt.EditRole:
if index.column() == 3:
routeNum = int(index.sibling(index.row(), 0).data())
self._editor.routes[routeNum].initialState = value
self.dataChanged.emit(index, index)
return True
return False
def headerData(self, section, orientation, role = Qt.DisplayRole):
"""Returns the header labels"""
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
if section == 0:
return self.tr("Route no.")
elif section == 1:
return self.tr("Begin Signal")
elif section == 2:
return self.tr("End Signal")
elif section == 3:
return self.tr("Initial State")
return None
def flags(self, index):
"""Returns the flags of the model"""
retFlag = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 3:
retFlag |= Qt.ItemIsEditable
return retFlag
class Route(QtCore.QObject):
"""@brief Path between two signals
A route is a path between two signals. If a route is activated, the path
is selected, and the signals at the beginning and the end of the route are
changed and the conflicting possible other routes are inhibited. Routes
are static and defined in the game file. The player can only activate or
deactivate them.
"""
def __init__(self, simulation, routeNum, beginSignal, endSignal,
initialState = 0):
"""Constructor of the Route class. After construction, the directions
dictionary must be filled and then the _positions list must be
populated by calling createPositionsList().
@param routeNum The route number (id)
@param beginSignal Pointer to the SignalItem at which the route starts
@param endSignal Pointer to the SignalItem at which the route ends"""
super().__init__(simulation)
self.simulation = simulation
self._routeNum = routeNum
bsp = ts2.routing.Position(beginSignal, beginSignal.previousItem, 0)
esp = ts2.routing.Position(endSignal, endSignal.previousItem, 0)
self._positions = [bsp, esp]
self._directions = {}
self._initialState = initialState
self._persistent = False
routeSelected = QtCore.pyqtSignal()
routeUnselected = QtCore.pyqtSignal()
@property
def positions(self):
"""Returns the positions list of this route."""
return self._positions
@property
def routeNum(self):
"""Returns this route number"""
return self._routeNum
@property
def beginSignal(self):
""" Returns the SignalItem where this route starts."""
return self._positions[0].trackItem
@property
def endSignal(self):
"""Returns the SignalItem where this route ends."""
return self._positions[-1].trackItem
@property
def initialState(self):
"""Returns the state of the route at the beginning of the simulation.
0 => Not activated
1 => Activated, non persistent
2 => Activated, persistent"""
return self._initialState
@initialState.setter
def initialState(self, value):
"""Setter function for the initialState property"""
value = int(value)
if value < 0 or value > 2:
value = 0
self._initialState = value
def getRouteState(self):
"""Returns the current route state:
0 => Not activated
1 => Activated, non persistent
2 => Activated, persistent."""
if self.beginSignal.nextActiveRoute is not None and \
self.beginSignal.nextActiveRoute == self:
if self._persistent:
return 2
else:
return 1
else:
return 0
@property
def directions(self):
"""Returns the directions dictionary"""
return self._directions
def direction(self, tiId):
"""Returns the direction of this route at the trackItem with id tiId
"""
return self._directions[tiId]
def appendDirection(self, tiId, direction):
""" Appends a direction to a TrackItem on the Route.
@param tiId The trackItem number to which we add direction
@param direction The direction to append.
For points, 0 means normal and other values means reverse"""
self._directions[tiId] = direction
def createPositionsList(self):
""" Populates the _positions list.
If the route is invalid, it leaves the _positions list empty.
Also completes the _directions map, with obvious directions."""
cur = self._positions[0].next()
it = 1
while not cur.isOut():
if cur == self._positions[-1]:
return True
self._positions.insert(it, cur)
it += 1
if cur.trackItem.tiType.startswith("P"):
if cur.previousTI == cur.trackItem.normalItem:
self._directions[cur.trackItem.tiId] = 0
elif cur.previousTI == cur.trackItem.reverseItem:
self._directions[cur.trackItem.tiId] = 1
elif cur.previousTI == cur.trackItem.commonItem \
and cur.trackItem.tiId not in self._directions:
self._directions[cur.trackItem.tiId] = 0
cur = cur.next(0, self._directions.get(cur.trackItem.tiId, -1))
QtCore.qCritical(self.tr("Invalid route %i. "
"Impossible to link beginSignal with endSignal"
% self.routeNum))
return False
def links(self, si1, si2):
""" Returns true if the route links SignalItem si1 to SignalItem si2.
@param si1 First SignalItem
@param si2 Last SignalItem"""
if self.beginSignal == si1 and self.endSignal == si2:
return True
else:
return False
def activate(self, persistent = False):
""" This function is called by the simulation when the route is
activated."""
for pos in self._positions:
pos.trackItem.setActiveRoute(self, pos.previousTI)
self.endSignal.previousActiveRoute = self
self.beginSignal.nextActiveRoute = self
self.persistent = persistent
self.routeSelected.emit()
def desactivate(self):
"""This function is called by the simulation when the route is
desactivated."""
self.beginSignal.resetNextActiveRoute(self)
self.endSignal.resetPreviousActiveRoute()
for pos in self._positions:
if pos.trackItem.activeRoute is None or \
pos.trackItem.activeRoute == self:
pos.trackItem.resetActiveRoute()
self.routeUnselected.emit()
def isActivable(self):
"""Returns true if this route can be activated, i.e. that no other
active route is conflicting with this route."""
flag = False
for pos in self._positions:
if pos.trackItem != self.beginSignal and \
pos.trackItem != self.endSignal:
if pos.trackItem.conflictTI is not None \
and pos.trackItem.conflictTI.activeRoute is not None:
# The trackItem has a conflict item and this conflict item
# has an active route
return False
if pos.trackItem.activeRoute is not None:
# The trackItem already has an active route
if pos.trackItem.tiType.startswith("P") and flag == False:
# The trackItem is a pointsItem and it is the first
# trackItem with active route that we meet
return False
if pos.previousTI!=pos.trackItem.activeRoutePreviousItem:
# The direction of this route is different from that
# of the active route of the TI
return False
if pos.trackItem.activeRoute == self:
# Always allow to setup the same route again
return True
else:
# We set flag to true to remember we have come across
# a TI with activeRoute with same dir. This enables
# the user to set a route ending with the same end
# signal when it is cleared by a train still
# on the route
flag = True
elif flag:
# We had a route with same direction but does not end with
# the same signal
return False
return True
@property
def persistent(self):
"""Returns True if this route is persistent"""
return self._persistent
@persistent.setter
def persistent(self, p = True):
"""Setter function for the persistent property"""
self._persistent = p
def __eq__(self, other):
"""Two routes are equal if they have the save routeNum or if both
beginSignal and endSignal are equal"""
if (self.routeNum == other.routeNum or
(self.beginSignal == other.beginSignal and
self.endSignal == other.endSignal)):
return True
else:
return False
def __ne__(self, other):
"""Two routes are not equal if they have different routeNum and if
at least one of beginSignal or endSignal is different"""
if (self.routeNum != other.routeNum and
(self.beginSignal != other.beginSignal or
self.endSignal != other.endSignal)):
return True
else:
return False
def __lt__(self, other):
"""Route is lower than other when its routeNum is lower"""
return self.routeNum < other.routeNum
def __gt__(self, other):
"""Route is greater than other when its routeNum is greater"""
return self.routeNum > other.routeNum
|
Sharpe49/ts2
|
ts2/routing/route.py
|
Python
|
gpl-2.0
| 12,444
| 0.001848
|
# -*- coding: utf-8 -*-
#
# # Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Contributing Authors:
# - Ansible Core Team
# - Eduard Snesarev (@verm666)
# - Berend De Schouwer (@berenddeschouwer)
# - Abhijeet Kasurde (@Akasurde)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import time
import glob
import tempfile
from abc import ABCMeta, abstractmethod
from ansible.module_utils._text import to_native
from ansible.module_utils.six import with_metaclass
yumdnf_argument_spec = dict(
argument_spec=dict(
allow_downgrade=dict(type='bool', default=False),
autoremove=dict(type='bool', default=False),
bugfix=dict(required=False, type='bool', default=False),
conf_file=dict(type='str'),
disable_excludes=dict(type='str', default=None),
disable_gpg_check=dict(type='bool', default=False),
disable_plugin=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
download_only=dict(type='bool', default=False),
enable_plugin=dict(type='list', default=[]),
enablerepo=dict(type='list', default=[]),
exclude=dict(type='list', default=[]),
installroot=dict(type='str', default="/"),
install_repoquery=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', aliases=['pkg'], default=[]),
releasever=dict(default=None),
security=dict(type='bool', default=False),
skip_broken=dict(type='bool', default=False),
# removed==absent, installed==present, these are accepted as aliases
state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
update_only=dict(required=False, default="no", type='bool'),
validate_certs=dict(type='bool', default=True),
lock_timeout=dict(type='int', default=0),
),
required_one_of=[['name', 'list', 'update_cache']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True,
)
class YumDnf(with_metaclass(ABCMeta, object)):
"""
Abstract class that handles the population of instance variables that should
be identical between both YUM and DNF modules because of the feature parity
and shared argument spec
"""
def __init__(self, module):
self.module = module
self.allow_downgrade = self.module.params['allow_downgrade']
self.autoremove = self.module.params['autoremove']
self.bugfix = self.module.params['bugfix']
self.conf_file = self.module.params['conf_file']
self.disable_excludes = self.module.params['disable_excludes']
self.disable_gpg_check = self.module.params['disable_gpg_check']
self.disable_plugin = self.module.params['disable_plugin']
self.disablerepo = self.module.params.get('disablerepo', [])
self.download_only = self.module.params['download_only']
self.enable_plugin = self.module.params['enable_plugin']
self.enablerepo = self.module.params.get('enablerepo', [])
self.exclude = self.module.params['exclude']
self.installroot = self.module.params['installroot']
self.install_repoquery = self.module.params['install_repoquery']
self.list = self.module.params['list']
self.names = [p.strip() for p in self.module.params['name']]
self.releasever = self.module.params['releasever']
self.security = self.module.params['security']
self.skip_broken = self.module.params['skip_broken']
self.state = self.module.params['state']
self.update_only = self.module.params['update_only']
self.update_cache = self.module.params['update_cache']
self.validate_certs = self.module.params['validate_certs']
self.lock_timeout = self.module.params['lock_timeout']
# It's possible someone passed a comma separated string since it used
# to be a string type, so we should handle that
self.names = self.listify_comma_sep_strings_in_list(self.names)
self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo)
self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo)
self.exclude = self.listify_comma_sep_strings_in_list(self.exclude)
# Fail if someone passed a space separated string
# https://github.com/ansible/ansible/issues/46301
if any((' ' in name and '@' not in name and '==' not in name for name in self.names)):
module.fail_json(
msg='It appears that a space separated string of packages was passed in '
'as an argument. To operate on several packages, pass a comma separated '
'string of packages or a list of packages.'
)
# This should really be redefined by both the yum and dnf module but a
# default isn't a bad idea
self.lockfile = '/var/run/yum.pid'
def wait_for_lock(self):
'''Poll until the lock is removed if timeout is a positive number'''
if (os.path.isfile(self.lockfile) or glob.glob(self.lockfile)):
if self.lock_timeout > 0:
for iteration in range(0, self.lock_timeout):
time.sleep(1)
if not os.path.isfile(self.lockfile) and not glob.glob(self.lockfile):
return
self.module.fail_json(msg='{0} lockfile is held by another process'.format(self.pkg_mgr_name))
def listify_comma_sep_strings_in_list(self, some_list):
"""
method to accept a list of strings as the parameter, find any strings
in that list that are comma separated, remove them from the list and add
their comma separated elements to the original list
"""
new_list = []
remove_from_original_list = []
for element in some_list:
if ',' in element:
remove_from_original_list.append(element)
new_list.extend([e.strip() for e in element.split(',')])
for element in remove_from_original_list:
some_list.remove(element)
some_list.extend(new_list)
if some_list == [""]:
return []
return some_list
@abstractmethod
def run(self):
raise NotImplementedError
|
alexlo03/ansible
|
lib/ansible/module_utils/yumdnf.py
|
Python
|
gpl-3.0
| 6,502
| 0.001846
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserSettings'
db.create_table(u'cms_usersettings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('language', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal('cms', ['UserSettings'])
def backwards(self, orm):
# Deleting model 'UserSettings'
db.delete_table(u'cms_usersettings')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
SinnerSchraderMobileMirrors/django-cms
|
cms/migrations/0041_auto__add_usersettings.py
|
Python
|
bsd-3-clause
| 15,866
| 0.00832
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._query import query
|
kubeflow/kfp-tekton-backend
|
components/gcp/container/component_sdk/python/kfp_component/google/bigquery/__init__.py
|
Python
|
apache-2.0
| 601
| 0.001664
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: composer
author: '"Dimitrios Tydeas Mengidis (@dmtrs)" <tydeas.dr@gmail.com>'
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on
required: false
default: install
working_dir:
description:
- Directory of your project ( see --working-dir )
required: true
default: null
aliases: [ "working-dir" ]
prefer_source:
description:
- Forces installation from package sources when possible ( see --prefer-source )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions ( see --prefer-dist )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages ( see --no-dev )
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json ( see --no-scripts )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "optimize-autoloader" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer: command=install working_dir=/path/to/project
'''
import os
import re
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
if "Nothing to install or update" in string:
return False
else:
return True
def composer_install(module, command, options):
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options))
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(default="install", type="str", required=False),
working_dir = dict(aliases=["working-dir"], required=True),
prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
no_dev = dict(default="yes", type="bool", aliases=["no-dev"]),
no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]),
no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]),
optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]),
),
supports_check_mode=True
)
options = []
# Default options
options.append('--no-ansi')
options.append('--no-progress')
options.append('--no-interaction')
options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])])
# Get composer command with fallback to default
command = module.params['command']
# Prepare options
if module.params['prefer_source']:
options.append('--prefer-source')
if module.params['prefer_dist']:
options.append('--prefer-dist')
if module.params['no_dev']:
options.append('--no-dev')
if module.params['no_scripts']:
options.append('--no-scripts')
if module.params['no_plugins']:
options.append('--no-plugins')
if module.params['optimize_autoloader']:
options.append('--optimize-autoloader')
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_install(module, command, options)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gaqzi/ansible-modules-extras
|
packaging/language/composer.py
|
Python
|
gpl-3.0
| 6,200
| 0.008548
|
# jsb/plugs/core/rc.py
#
#
""" jsonbot resource files .. files with the .jsb extension which consists of commands to be executed. """
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.utils.url import geturl2
from jsb.utils.exception import handle_exception
from jsb.utils.generic import waitforqueue, waitevents
## basic imports
import copy
## defines
cpy = copy.deepcopy
## rc command
def handle_rc(bot, event):
""" import aliases by url. assumes a .RC file. 1 alias per line """
if not event.rest: event.missing("<file>|<url>") ; return
teller = 0
t = event.rest
waiting = []
try:
try:
if t.startswith("http"): data = geturl2(t)
else: data = open(t, 'r').read()
except IOError, ex: event.reply("I/O error: %s" % str(ex)) ; return
if not data: event.reply("can't get data from %s" % event.rest) ; return
for d in data.split("\n"):
i = d.strip()
if not i: continue
if i.startswith("#"): continue
e = cpy(event)
e.txt = "%s" % i.strip()
e.direct = True
bot.put(e)
waiting.append(e)
#result = bot.docmnd(event.userhost, event.channel, i, wait=1, event=event)
#if result: result.waitall()
teller += 1
#waitevents(waiting)
event.reply("%s commands executed" % teller)
except Exception, ex: event.reply("an error occured: %s" % str(ex)) ; handle_exception()
cmnds.add("rc", handle_rc, ["OPER"], threaded=True)
examples.add("rc", "execute a file of jsonbot commands .. from file or url", "1) rc resource.jsb 2) rc http://jsonbot.org/resource.jsb")
|
melmothx/jsonbot
|
jsb/plugs/core/rc.py
|
Python
|
mit
| 1,732
| 0.017321
|
from __future__ import print_function
import unittest
import numpy as np
import pydrake
import os.path
class TestRBTCoM(unittest.TestCase):
def testCoM0(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
"examples/Pendulum/Pendulum.urdf"))
kinsol = r.doKinematics(np.zeros((7, 1)), np.zeros((7, 1)))
c = r.centerOfMass(kinsol)
self.assertTrue(np.allclose(c.flat, [0.0, 0.0, -0.2425], atol=1e-4))
def testCoMJacobian(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
"examples/Pendulum/Pendulum.urdf"))
q = r.getRandomConfiguration()
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue(np.shape(J) == (3, 7))
q = r.getZeroConfiguration()
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue(np.allclose(J.flat, [1., 0., 0., 0., -0.2425, 0., -0.25,
0., 1., 0., 0.2425, 0., 0., 0.,
0., 0., 1., 0., 0., 0., 0.], atol=1e-4))
if __name__ == '__main__':
unittest.main()
|
billhoffman/drake
|
drake/bindings/python/pydrake/test/testRBTCoM.py
|
Python
|
bsd-3-clause
| 1,228
| 0.002443
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2018-07-24
# @Filename: test_rss.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-08-04 13:35:39
import astropy.io.fits
import astropy.table
import numpy
import pytest
import marvin
from ..conftest import Galaxy, set_the_config
@pytest.fixture(scope='session')
def galaxy(get_params, plateifu):
"""Yield an instance of a Galaxy object for use in tests."""
release, bintype, template = get_params
set_the_config(release)
gal = Galaxy(plateifu=plateifu)
gal.set_params(bintype=bintype, template=template, release=release)
gal.set_filepaths()
gal.set_galaxy_data()
yield gal
@pytest.fixture(scope='session')
def rss_session(galaxy, mode):
# These get created only once per session.
if mode == 'auto' or str(galaxy.bintype) != 'SPX':
pytest.skip()
if mode == 'local':
rss = marvin.tools.RSS(filename=galaxy.rsspath, release=galaxy.release, mode='local')
else:
rss = marvin.tools.RSS(plateifu=galaxy.plateifu, release=galaxy.release, mode='remote')
rss.expdata = galaxy.rss
yield rss
@pytest.fixture(scope='function')
def rss(rss_session):
# In some of the tests we modify the RSS objects. Here we implement
# a setup procedure that "unloads" the RSSFiber objects and resets the
# autoload attribute.
for rssfiber in rss_session:
rssfiber.loaded = False
rss_session.autoload = True
yield rss_session
@pytest.fixture(scope='session')
def rssfiber(rss_session):
fiberid = 0
if rss_session[fiberid].loaded is False:
rss_session[fiberid].load()
yield rss_session[fiberid]
@pytest.mark.usefixtures('monkeyauth')
class TestRSS(object):
def test_rss_init(self, rss):
assert isinstance(rss, marvin.tools.RSS)
assert isinstance(rss, marvin.tools.mixins.NSAMixIn)
assert isinstance(rss, list)
assert isinstance(rss.obsinfo, astropy.table.Table)
if rss.mode == 'file':
assert isinstance(rss.data, astropy.io.fits.HDUList)
assert rss._wavelength is not None
assert len(rss) == rss._nfibers
rss.autoload = False # To make things faster for this test
assert all([isinstance(rss_fiber, marvin.tools.rss.RSSFiber) for rss_fiber in rss])
@pytest.mark.parametrize('autoload', [True, False])
def test_rss_autoload(self, rss, autoload):
rss.autoload = autoload
assert rss[0].loaded is autoload
def test_load(self, rss):
rss.autoload = False
assert rss[0].loaded is False
rss[0].load()
assert rss[0].loaded is True
def test_load_all(self, rss):
if rss.mode == 'remote':
pytest.skip()
rss.load_all()
assert all([rss_fiber.loaded is True for rss_fiber in rss])
def test_obsinfo_to_rssfiber(self, rss):
# We get it in this complicated way so that it is a different way of
# obtianing it than in the _populate_fibres method.
ifusize = int(str(rss.ifu)[0:-2])
exp_idx = 0
n_fiber = 1
for rssfiber in rss:
assert numpy.all(rss.obsinfo[exp_idx] == rssfiber.obsinfo)
n_fiber += 1
if n_fiber > ifusize:
n_fiber = 1
exp_idx += 1
def test_getcube(self, rss):
cube = rss.getCube()
assert isinstance(cube, marvin.tools.Cube)
assert cube.mode == rss.mode
assert cube.plateifu == rss.plateifu
assert cube.mangaid == rss.mangaid
assert cube.release == rss.release
def test_select_fibers(self, rss):
# Skipping for API or it will take forever. Should not matter since
# we have already tested slicing for API.
if rss.data_origin == 'api':
pytest.skip()
fibers_expnum = rss.select_fibers(exposure_no=rss.expdata['expnum'])
assert len(fibers_expnum) == rss.expdata['nfiber']
assert fibers_expnum[0].obsinfo['EXPNUM'][0] == rss.expdata['expnum']
fibers_mjd = rss.select_fibers(mjd=1234)
assert len(fibers_mjd) == 0
fibers_mjd = rss.select_fibers(mjd=rss.expdata['mjd'])
assert len(fibers_mjd) == (rss.expdata['nexp'] * rss.expdata['nfiber'])
assert fibers_mjd[0].obsinfo['MJD'][0] == rss.expdata['mjd']
@pytest.mark.usefixtures('monkeyauth')
class TestRSSFiber(object):
def test_rssfiber_spectra(self, rssfiber):
assert isinstance(rssfiber, marvin.tools.RSSFiber)
assert isinstance(rssfiber.rss, marvin.tools.RSS)
assert isinstance(rssfiber.obsinfo, astropy.table.Table)
assert hasattr(rssfiber, 'ivar')
assert isinstance(rssfiber.ivar, numpy.ndarray)
assert len(rssfiber.ivar) == len(rssfiber.wavelength)
assert hasattr(rssfiber, 'mask')
assert isinstance(rssfiber.mask, numpy.ndarray)
assert len(rssfiber.mask) == len(rssfiber.wavelength)
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum = getattr(rssfiber, dm_element.name, None)
assert spectrum is not None
assert isinstance(spectrum, numpy.ndarray)
assert len(spectrum) == len(rssfiber.wavelength)
def test_rssfiber_data(self, rssfiber):
rss_filename = rssfiber.rss._getFullPath()
rss_hdu = astropy.io.fits.open(rss_filename)
numpy.testing.assert_allclose(rss_hdu['FLUX'].data[rssfiber.fiberid, :], rssfiber.value)
numpy.testing.assert_allclose(rss_hdu['IVAR'].data[rssfiber.fiberid, :], rssfiber.ivar)
numpy.testing.assert_array_equal(rss_hdu['MASK'].data[rssfiber.fiberid, :], rssfiber.mask)
for dm_element in rssfiber.rss.datamodel.rss:
if dm_element.name == 'flux':
continue
fits_data = rss_hdu[dm_element.fits_extension()].data[rssfiber.fiberid, :]
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
for dm_element in rssfiber.rss.datamodel.spectra:
fits_data = rss_hdu[dm_element.fits_extension()].data
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
def test_rssfiber_slice(self, rssfiber):
n_elements = 10
sliced = rssfiber[0:n_elements]
assert len(sliced.value) == n_elements
numpy.testing.assert_allclose(sliced.value, rssfiber.value[0:n_elements])
assert len(sliced.ivar) == n_elements
assert len(sliced.mask) == n_elements
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum_sliced = getattr(sliced, dm_element.name, None)
assert len(spectrum_sliced) == n_elements
assert sliced.obsinfo is not None
def test_rssfiber_masked(self, rssfiber):
assert numpy.sum(rssfiber.masked.mask) > 0
def test_rssfiber_descale(self, rssfiber):
descaled = rssfiber.descale()
numpy.testing.assert_allclose(descaled.value, rssfiber.value * rssfiber.unit.scale)
assert descaled.obsinfo is not None
class TestPickling(object):
def test_pickling_file(self, temp_scratch, rss):
if rss.data_origin == 'file':
assert rss.data is not None
rss_file = temp_scratch.join('test_rss.mpf')
rss.save(str(rss_file))
assert rss_file.check() is True
rss_restored = marvin.tools.RSS.restore(str(rss_file))
assert rss_restored.data_origin == rss.data_origin
assert isinstance(rss_restored, marvin.tools.RSS)
assert len(rss_restored) > 0
assert isinstance(rss_restored[0], marvin.tools.RSSFiber)
assert numpy.sum(rss_restored[0].value) > 0
if rss.data_origin == 'file':
assert rss_restored.data is not None
else:
assert rss_restored.data is None
|
sdss/marvin
|
tests/tools/test_rss.py
|
Python
|
bsd-3-clause
| 8,275
| 0.001693
|
'''This module contains utilities for following up search triggers'''
# JavaScript for searching the aLOG
redirect_javascript = """<script type="text/javascript">
function redirect(form,way)
{
// Set location to form and submit.
if(form != '')
{
document.forms[form].action=way;
document.forms[form].submit();
}
else
{
window.top.location = way;
}
}
</script>"""
search_form_string="""<form name="%s_alog_search" id="%s_alog_search" method="post">
<input type="hidden" name="srcDateFrom" id="srcDateFrom" value="%s" size="20"/>
<input type="hidden" name="srcDateTo" id="srcDateTo" value="%s" size="20"/>
</form>"""
data_h1_string = """H1
<a href=https://ldas-jobs.ligo-wa.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('h1_alog_search',
'https://alog.ligo-wa.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
data_l1_string="""L1
<a href=https://ldas-jobs.ligo-la.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('l1_alog_search',
'https://alog.ligo-la.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
def get_summary_page_link(ifo, utc_time):
"""Return a string that links to the summary page and aLOG for this ifo
Parameters
----------
ifo : string
The detector name
utc_time : sequence
First three elements must be strings giving year, month, day resp.
Returns
-------
return_string : string
String containing HTML for links to summary page and aLOG search
"""
search_form = search_form_string
data = {'H1': data_h1_string, 'L1': data_l1_string}
if ifo not in data:
return ifo
else:
# alog format is day-month-year
alog_utc = '%02d-%02d-%4d' % (utc_time[2], utc_time[1], utc_time[0])
# summary page is exactly the reverse
ext = '%4d%02d%02d' % (utc_time[0], utc_time[1], utc_time[2])
return_string = search_form % (ifo.lower(), ifo.lower(), alog_utc, alog_utc)
return return_string + data[ifo] % ext
|
ligo-cbc/pycbc
|
pycbc/results/dq.py
|
Python
|
gpl-3.0
| 2,187
| 0.002286
|
from copy import deepcopy
from sqlalchemy import inspect
from sqlalchemy.orm.base import DEFAULT_STATE_ATTR
from sqlalchemy.orm.state import InstanceState
from mongosql.bag import ModelPropertyBags
class ModelHistoryProxy:
""" Proxy object to gain access to historical model attributes.
This leverages SqlAlchemy attribute history to provide access to the previous value of an
attribute. The only reason why this object exists is because keeping two instances in memory may
be expensive. But because normally you'll only need a field or two, the decision was to use
this magic proxy object that will load model history on demand.
Why would you need to access model history at all?
Because CrudHelper's update method (i.e., changing model fields) gives you two objects: the
current instance, and the old instance, so that your custom code in the update handler can
compare those fields.
For instance, when a certain object is being moved from one User to another, you might want
to notify both of them. In that case, you'll need access to the historical user.
The initial solution was to *copy* the instance, apply the modifications from JSON to a copy,
and then feed both of them to the save handler... but copying was expensive.
That's why we have this proxy: it does not load all the fields of the historical model,
but acts as a proxy object (__getattr__()) that will get those properties on demand.
"""
def __init__(self, instance):
# Save the information that we'll definitely need
self.__instance = instance
self.__model = self.__instance.__class__
self.__bags = ModelPropertyBags.for_model(self.__model) # type: ModelPropertyBags
self.__inspect = inspect(instance) # type: InstanceState
# Copy every field onto ourselves
self.__copy_from_instance(self.__instance)
# Enable accessing relationships through our proxy
self.__install_instance_state(instance)
def __copy_from_instance(self, instance):
""" Copy all attributes of `instance` to `self`
Alright, this code renders the whole point of having ModelHistoryProxy void.
There is an issue with model history:
"Each time the Session is flushed, the history of each attribute is reset to empty.
The Session by default autoflushes each time a Query is invoked"
https://docs.sqlalchemy.org/en/latest/orm/internals.html#sqlalchemy.orm.state.AttributeState.history
This means that as soon as you load a relationship, model history is reset.
To solve this, we have to make a copy of this model.
All attributes are set on `self`, so accessing `self.attr` will not trigger `__getattr__()`
"""
""" Copy the given list of columns from the instance onto self """
insp = self.__inspect # type: InstanceState
# Copy all values onto `self`
for column_name in self.__bags.columns.names:
# Skip unloaded columns (because that would emit sql queries)
# Also skip the columns that were already copied (perhaps, mutable columns?)
if column_name not in insp.unloaded:
# The state
attr_state = insp.attrs[column_name] # type: AttributeState
# Get the historical value
# deepcopy() ensures JSON and ARRAY values are copied in full
hist_val = deepcopy(_get_historical_value(attr_state))
# Remove the value onto `self`: we're bearing the value now
setattr(self, column_name, hist_val)
def __install_instance_state(self, instance):
""" Install an InstanceState, so that relationship descriptors can work properly """
# These lines install the internal SqlAlchemy's property on our proxy
# This property mimics the original object.
# This ensures that we can access relationship attributes through a ModelHistoryProxy object
# Example:
# hist = ModelHistoryProxy(comment)
# hist.user.id # wow!
instance_state = getattr(instance, DEFAULT_STATE_ATTR)
my_state = InstanceState(self, instance_state.manager)
my_state.key = instance_state.key
my_state.session_id = instance_state.session_id
setattr(self, DEFAULT_STATE_ATTR, my_state)
def __getattr__(self, key):
# Get a relationship:
if key in self.__bags.relations:
relationship = getattr(self.__model, key)
return relationship.__get__(self, self.__model)
# Get a property (@property)
if key in self.__bags.properties:
# Because properties may use other columns,
# we have to run it against our`self`, because only then it'll be able to get the original values.
return getattr(self.__model, key).fget(self)
# Every column attribute is accessed through history
attr = self.__inspect.attrs[key]
return _get_historical_value(attr)
def _get_historical_value(attr):
""" Get the previous value of an attribute
This is where the magic happens: this method goes into the SqlAlchemy instance and
obtains the historical value of an attribute called `key`
"""
# Examine attribute history
# If a value was deleted (e.g. replaced) -- we return it as the previous version.
history = attr.history
if not history.deleted:
# No previous value, return the current value instead
return attr.value
else:
# Return the previous value
# It's a tuple, since History supports collections, but we do not support these,
# so just get the first element
return history.deleted[0]
|
kolypto/py-mongosql
|
mongosql/util/history_proxy.py
|
Python
|
bsd-2-clause
| 5,787
| 0.003974
|
#/usr/bin/env python
#Base Server -Chapter three -basicserver.py
import socket, traceback
host=''
port=8080
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
print "Waiting for connections..."
s.listen(1)
while True:
try:
clientsock, clientaddr=s.accept()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
continue
try:
print "Got connection from", clientsock.getpeername()
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
try:
clientsock.close()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
|
jac2130/BayesGame
|
foundations-of-python-network-programming/python2/01/basicserver.py
|
Python
|
mit
| 745
| 0.013423
|
from textwrap import dedent
from sympy import (
symbols, Integral, Tuple, Dummy, Basic, default_sort_key, Matrix,
factorial, true)
from sympy.combinatorics import RGS_enum, RGS_unrank, Permutation
from sympy.utilities.iterables import (
_partition, _set_partitions, binary_partitions, bracelets, capture,
cartes, common_prefix, common_suffix, dict_merge, flatten,
generate_bell, generate_derangements, generate_involutions,
generate_oriented_forest, group, has_dups, kbins, minlex, multiset,
multiset_combinations, multiset_partitions, multiset_permutations,
necklaces, numbered_symbols, ordered, partitions, permutations,
postfixes, postorder_traversal, prefixes, reshape, rotate_left,
rotate_right, runs, sift, subsets, take, topological_sort, unflatten,
uniq, variations)
from sympy.core.singleton import S
from sympy.functions.elementary.piecewise import Piecewise, ExprCondPair
from sympy.utilities.pytest import raises
w, x, y, z = symbols('w,x,y,z')
def test_postorder_traversal():
expr = z + w*(x + y)
expected = [z, w, x, y, x + y, w*(x + y), w*(x + y) + z]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(expr, keys=True)) == expected
expr = Piecewise((x, x < 1), (x**2, True))
expected = [
x, 1, x, x < 1, ExprCondPair(x, x < 1),
2, x, x**2, true,
ExprCondPair(x**2, True), Piecewise((x, x < 1), (x**2, True))
]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(
[expr], keys=default_sort_key)) == expected + [[expr]]
assert list(postorder_traversal(Integral(x**2, (x, 0, 1)),
keys=default_sort_key)) == [
2, x, x**2, 0, 1, x, Tuple(x, 0, 1),
Integral(x**2, Tuple(x, 0, 1))
]
assert list(postorder_traversal(('abc', ('d', 'ef')))) == [
'abc', 'd', 'ef', ('d', 'ef'), ('abc', ('d', 'ef'))]
def test_flatten():
assert flatten((1, (1,))) == [1, 1]
assert flatten((x, (x,))) == [x, x]
ls = [[(-2, -1), (1, 2)], [(0, 0)]]
assert flatten(ls, levels=0) == ls
assert flatten(ls, levels=1) == [(-2, -1), (1, 2), (0, 0)]
assert flatten(ls, levels=2) == [-2, -1, 1, 2, 0, 0]
assert flatten(ls, levels=3) == [-2, -1, 1, 2, 0, 0]
raises(ValueError, lambda: flatten(ls, levels=-1))
class MyOp(Basic):
pass
assert flatten([MyOp(x, y), z]) == [MyOp(x, y), z]
assert flatten([MyOp(x, y), z], cls=MyOp) == [x, y, z]
assert flatten(set([1, 11, 2])) == list(set([1, 11, 2]))
def test_group():
assert group([]) == []
assert group([], multiple=False) == []
assert group([1]) == [[1]]
assert group([1], multiple=False) == [(1, 1)]
assert group([1, 1]) == [[1, 1]]
assert group([1, 1], multiple=False) == [(1, 2)]
assert group([1, 1, 1]) == [[1, 1, 1]]
assert group([1, 1, 1], multiple=False) == [(1, 3)]
assert group([1, 2, 1]) == [[1], [2], [1]]
assert group([1, 2, 1], multiple=False) == [(1, 1), (2, 1), (1, 1)]
assert group([1, 1, 2, 2, 2, 1, 3, 3]) == [[1, 1], [2, 2, 2], [1], [3, 3]]
assert group([1, 1, 2, 2, 2, 1, 3, 3], multiple=False) == [(1, 2),
(2, 3), (1, 1), (3, 2)]
def test_subsets():
# combinations
assert list(subsets([1, 2, 3], 0)) == [()]
assert list(subsets([1, 2, 3], 1)) == [(1,), (2,), (3,)]
assert list(subsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 3)) == [(1, 2, 3)]
l = list(range(4))
assert list(subsets(l, 0, repetition=True)) == [()]
assert list(subsets(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(subsets(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 1), (1, 2),
(1, 3), (2, 2), (2, 3),
(3, 3)]
assert list(subsets(l, 3, repetition=True)) == [(0, 0, 0), (0, 0, 1),
(0, 0, 2), (0, 0, 3),
(0, 1, 1), (0, 1, 2),
(0, 1, 3), (0, 2, 2),
(0, 2, 3), (0, 3, 3),
(1, 1, 1), (1, 1, 2),
(1, 1, 3), (1, 2, 2),
(1, 2, 3), (1, 3, 3),
(2, 2, 2), (2, 2, 3),
(2, 3, 3), (3, 3, 3)]
assert len(list(subsets(l, 4, repetition=True))) == 35
assert list(subsets(l[:2], 3, repetition=False)) == []
assert list(subsets(l[:2], 3, repetition=True)) == [(0, 0, 0),
(0, 0, 1),
(0, 1, 1),
(1, 1, 1)]
assert list(subsets([1, 2], repetition=True)) == \
[(), (1,), (2,), (1, 1), (1, 2), (2, 2)]
assert list(subsets([1, 2], repetition=False)) == \
[(), (1,), (2,), (1, 2)]
assert list(subsets([1, 2, 3], 2)) == \
[(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 2, repetition=True)) == \
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
def test_variations():
# permutations
l = list(range(4))
assert list(variations(l, 0, repetition=False)) == [()]
assert list(variations(l, 1, repetition=False)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=False)) == [(0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (2, 3), (3, 0), (3, 1), (3, 2)]
assert list(variations(l, 3, repetition=False)) == [(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)]
assert list(variations(l, 0, repetition=True)) == [()]
assert list(variations(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 0), (1, 1),
(1, 2), (1, 3), (2, 0),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2),
(3, 3)]
assert len(list(variations(l, 3, repetition=True))) == 64
assert len(list(variations(l, 4, repetition=True))) == 256
assert list(variations(l[:2], 3, repetition=False)) == []
assert list(variations(l[:2], 3, repetition=True)) == [
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)
]
def test_cartes():
assert list(cartes([1, 2], [3, 4, 5])) == \
[(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)]
assert list(cartes()) == [()]
assert list(cartes('a')) == [('a',)]
assert list(cartes('a', repeat=2)) == [('a', 'a')]
assert list(cartes(list(range(2)))) == [(0,), (1,)]
def test_numbered_symbols():
s = numbered_symbols(cls=Dummy)
assert isinstance(next(s), Dummy)
def test_sift():
assert sift(list(range(5)), lambda _: _ % 2) == {1: [1, 3], 0: [0, 2, 4]}
assert sift([x, y], lambda _: _.has(x)) == {False: [y], True: [x]}
assert sift([S.One], lambda _: _.has(x)) == {False: [1]}
def test_take():
X = numbered_symbols()
assert take(X, 5) == list(symbols('x0:5'))
assert take(X, 5) == list(symbols('x5:10'))
assert take([1, 2, 3, 4, 5], 5) == [1, 2, 3, 4, 5]
def test_dict_merge():
assert dict_merge({}, {1: x, y: z}) == {1: x, y: z}
assert dict_merge({1: x, y: z}, {}) == {1: x, y: z}
assert dict_merge({2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {2: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: y, 2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {1: y, 2: z}) == {1: y, 2: z, y: z}
def test_prefixes():
assert list(prefixes([])) == []
assert list(prefixes([1])) == [[1]]
assert list(prefixes([1, 2])) == [[1], [1, 2]]
assert list(prefixes([1, 2, 3, 4, 5])) == \
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]]
def test_postfixes():
assert list(postfixes([])) == []
assert list(postfixes([1])) == [[1]]
assert list(postfixes([1, 2])) == [[2], [1, 2]]
assert list(postfixes([1, 2, 3, 4, 5])) == \
[[5], [4, 5], [3, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]]
def test_topological_sort():
V = [2, 3, 5, 7, 8, 9, 10, 11]
E = [(7, 11), (7, 8), (5, 11),
(3, 8), (3, 10), (11, 2),
(11, 9), (11, 10), (8, 9)]
assert topological_sort((V, E)) == [3, 5, 7, 8, 11, 2, 9, 10]
assert topological_sort((V, E), key=lambda v: -v) == \
[7, 5, 11, 3, 10, 8, 9, 2]
raises(ValueError, lambda: topological_sort((V, E + [(10, 7)])))
def test_rotate():
A = [0, 1, 2, 3, 4]
assert rotate_left(A, 2) == [2, 3, 4, 0, 1]
assert rotate_right(A, 1) == [4, 0, 1, 2, 3]
A = []
B = rotate_right(A, 1)
assert B == []
B.append(1)
assert A == []
B = rotate_left(A, 1)
assert B == []
B.append(1)
assert A == []
def test_multiset_partitions():
A = [0, 1, 2, 3, 4]
assert list(multiset_partitions(A, 5)) == [[[0], [1], [2], [3], [4]]]
assert len(list(multiset_partitions(A, 4))) == 10
assert len(list(multiset_partitions(A, 3))) == 25
assert list(multiset_partitions([1, 1, 1, 2, 2], 2)) == [
[[1, 1, 1, 2], [2]], [[1, 1, 1], [2, 2]], [[1, 1, 2, 2], [1]],
[[1, 1, 2], [1, 2]], [[1, 1], [1, 2, 2]]]
assert list(multiset_partitions([1, 1, 2, 2], 2)) == [
[[1, 1, 2], [2]], [[1, 1], [2, 2]], [[1, 2, 2], [1]],
[[1, 2], [1, 2]]]
assert list(multiset_partitions([1, 2, 3, 4], 2)) == [
[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
assert list(multiset_partitions([1, 2, 2], 2)) == [
[[1, 2], [2]], [[1], [2, 2]]]
assert list(multiset_partitions(3)) == [
[[0, 1, 2]], [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]],
[[0], [1], [2]]]
assert list(multiset_partitions(3, 2)) == [
[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]]]
assert list(multiset_partitions([1] * 3, 2)) == [[[1], [1, 1]]]
assert list(multiset_partitions([1] * 3)) == [
[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
a = [3, 2, 1]
assert list(multiset_partitions(a)) == \
list(multiset_partitions(sorted(a)))
assert list(multiset_partitions(a, 5)) == []
assert list(multiset_partitions(a, 1)) == [[[1, 2, 3]]]
assert list(multiset_partitions(a + [4], 5)) == []
assert list(multiset_partitions(a + [4], 1)) == [[[1, 2, 3, 4]]]
assert list(multiset_partitions(2, 5)) == []
assert list(multiset_partitions(2, 1)) == [[[0, 1]]]
assert list(multiset_partitions('a')) == [[['a']]]
assert list(multiset_partitions('a', 2)) == []
assert list(multiset_partitions('ab')) == [[['a', 'b']], [['a'], ['b']]]
assert list(multiset_partitions('ab', 1)) == [[['a', 'b']]]
assert list(multiset_partitions('aaa', 1)) == [['aaa']]
assert list(multiset_partitions([1, 1], 1)) == [[[1, 1]]]
def test_multiset_combinations():
ans = ['iii', 'iim', 'iip', 'iis', 'imp', 'ims', 'ipp', 'ips',
'iss', 'mpp', 'mps', 'mss', 'pps', 'pss', 'sss']
assert [''.join(i) for i in
list(multiset_combinations('mississippi', 3))] == ans
M = multiset('mississippi')
assert [''.join(i) for i in
list(multiset_combinations(M, 3))] == ans
assert [''.join(i) for i in multiset_combinations(M, 30)] == []
assert list(multiset_combinations([[1], [2, 3]], 2)) == [[[1], [2, 3]]]
assert len(list(multiset_combinations('a', 3))) == 0
assert len(list(multiset_combinations('a', 0))) == 1
assert list(multiset_combinations('abc', 1)) == [['a'], ['b'], ['c']]
def test_multiset_permutations():
ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
'byba', 'yabb', 'ybab', 'ybba']
assert [''.join(i) for i in multiset_permutations('baby')] == ans
assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
assert list(multiset_permutations([0, 2, 1], 2)) == [
[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]
assert len(list(multiset_permutations('a', 0))) == 1
assert len(list(multiset_permutations('a', 3))) == 0
def test():
for i in range(1, 7):
print(i)
for p in multiset_permutations([0, 0, 1, 0, 1], i):
print(p)
assert capture(lambda: test()) == dedent('''\
1
[0]
[1]
2
[0, 0]
[0, 1]
[1, 0]
[1, 1]
3
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
4
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 1, 1]
[0, 1, 0, 0]
[0, 1, 0, 1]
[0, 1, 1, 0]
[1, 0, 0, 0]
[1, 0, 0, 1]
[1, 0, 1, 0]
[1, 1, 0, 0]
5
[0, 0, 0, 1, 1]
[0, 0, 1, 0, 1]
[0, 0, 1, 1, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 1, 0]
[0, 1, 1, 0, 0]
[1, 0, 0, 0, 1]
[1, 0, 0, 1, 0]
[1, 0, 1, 0, 0]
[1, 1, 0, 0, 0]
6\n''')
def test_partitions():
assert [p.copy() for p in partitions(6, k=2)] == [
{2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=3)] == [
{3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2},
{1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=2, m=2)] == []
assert [p.copy() for p in partitions(8, k=4, m=3)] == [
{4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [
i.copy() for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)
and sum(i.values()) <=3]
assert [p.copy() for p in partitions(S(3), m=2)] == [
{3: 1}, {1: 1, 2: 1}]
assert [i.copy() for i in partitions(4, k=3)] == [
{1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [
i.copy() for i in partitions(4) if all(k <= 3 for k in i)]
raises(ValueError, lambda: list(partitions(3, 0)))
# Consistency check on output of _partitions and RGS_unrank.
# This provides a sanity test on both routines. Also verifies that
# the total number of partitions is the same in each case.
# (from pkrathmann2)
for n in range(2, 6):
i = 0
for m, q in _set_partitions(n):
assert q == RGS_unrank(i, n)
i = i+1
assert i == RGS_enum(n)
def test_binary_partitions():
assert [i[:] for i in binary_partitions(10)] == [[8, 2], [8, 1, 1],
[4, 4, 2], [4, 4, 1, 1], [4, 2, 2, 2], [4, 2, 2, 1, 1],
[4, 2, 1, 1, 1, 1], [4, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert len([j[:] for j in binary_partitions(16)]) == 36
def test_bell_perm():
assert [len(list(generate_bell(i))) for i in range(1, 7)] == [
factorial(i) for i in range(1, 7)]
assert list(generate_bell(3)) == [
(0, 1, 2), (1, 0, 2), (1, 2, 0), (2, 1, 0), (2, 0, 1), (0, 2, 1)]
def test_involutions():
lengths = [1, 2, 4, 10, 26, 76]
for n, N in enumerate(lengths):
i = list(generate_involutions(n + 1))
assert len(i) == N
assert len(set([Permutation(j)**2 for j in i])) == 1
def test_derangements():
assert len(list(generate_derangements(list(range(6))))) == 265
assert ''.join(''.join(i) for i in generate_derangements('abcde')) == (
'badecbaecdbcaedbcdeabceadbdaecbdeacbdecabeacdbedacbedcacabedcadebcaebd'
'cdaebcdbeacdeabcdebaceabdcebadcedabcedbadabecdaebcdaecbdcaebdcbeadceab'
'dcebadeabcdeacbdebacdebcaeabcdeadbceadcbecabdecbadecdabecdbaedabcedacb'
'edbacedbca')
assert list(generate_derangements([0, 1, 2, 3])) == [
[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1],
[2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], [3, 2, 1, 0]]
assert list(generate_derangements([0, 1, 2, 2])) == [
[2, 2, 0, 1], [2, 2, 1, 0]]
def test_necklaces():
def count(n, k, f):
return len(list(necklaces(n, k, f)))
m = []
for i in range(1, 8):
m.append((
i, count(i, 2, 0), count(i, 2, 1), count(i, 3, 1)))
assert Matrix(m) == Matrix([
[1, 2, 2, 3],
[2, 3, 3, 6],
[3, 4, 4, 10],
[4, 6, 6, 21],
[5, 8, 8, 39],
[6, 14, 13, 92],
[7, 20, 18, 198]])
def test_generate_oriented_forest():
assert list(generate_oriented_forest(5)) == [[0, 1, 2, 3, 4],
[0, 1, 2, 3, 3], [0, 1, 2, 3, 2], [0, 1, 2, 3, 1], [0, 1, 2, 3, 0],
[0, 1, 2, 2, 2], [0, 1, 2, 2, 1], [0, 1, 2, 2, 0], [0, 1, 2, 1, 2],
[0, 1, 2, 1, 1], [0, 1, 2, 1, 0], [0, 1, 2, 0, 1], [0, 1, 2, 0, 0],
[0, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 1, 0, 1], [0, 1, 1, 0, 0],
[0, 1, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]]
assert len(list(generate_oriented_forest(10))) == 1842
def test_unflatten():
r = list(range(10))
assert unflatten(r) == list(zip(r[::2], r[1::2]))
assert unflatten(r, 5) == [tuple(r[:5]), tuple(r[5:])]
raises(ValueError, lambda: unflatten(list(range(10)), 3))
raises(ValueError, lambda: unflatten(list(range(10)), -2))
def test_common_prefix_suffix():
assert common_prefix([], [1]) == []
assert common_prefix(list(range(3))) == [0, 1, 2]
assert common_prefix(list(range(3)), list(range(4))) == [0, 1, 2]
assert common_prefix([1, 2, 3], [1, 2, 5]) == [1, 2]
assert common_prefix([1, 2, 3], [1, 3, 5]) == [1]
assert common_suffix([], [1]) == []
assert common_suffix(list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(4))) == []
assert common_suffix([1, 2, 3], [9, 2, 3]) == [2, 3]
assert common_suffix([1, 2, 3], [9, 7, 3]) == [3]
def test_minlex():
assert minlex([1, 2, 0]) == (0, 1, 2)
assert minlex((1, 2, 0)) == (0, 1, 2)
assert minlex((1, 0, 2)) == (0, 2, 1)
assert minlex((1, 0, 2), directed=False) == (0, 1, 2)
assert minlex('aba') == 'aab'
def test_ordered():
assert list(ordered((x, y), hash, default=False)) in [[x, y], [y, x]]
assert list(ordered((x, y), hash, default=False)) == \
list(ordered((y, x), hash, default=False))
assert list(ordered((x, y))) == [x, y]
seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]],
(lambda x: len(x), lambda x: sum(x))]
assert list(ordered(seq, keys, default=False, warn=False)) == \
[[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
raises(ValueError, lambda:
list(ordered(seq, keys, default=False, warn=True)))
def test_runs():
assert runs([]) == []
assert runs([1]) == [[1]]
assert runs([1, 1]) == [[1], [1]]
assert runs([1, 1, 2]) == [[1], [1, 2]]
assert runs([1, 2, 1]) == [[1, 2], [1]]
assert runs([2, 1, 1]) == [[2], [1], [1]]
from operator import lt
assert runs([2, 1, 1], lt) == [[2, 1], [1]]
def test_reshape():
seq = list(range(1, 9))
assert reshape(seq, [4]) == \
[[1, 2, 3, 4], [5, 6, 7, 8]]
assert reshape(seq, (4,)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, 2)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, [2])) == \
[(1, 2, [3, 4]), (5, 6, [7, 8])]
assert reshape(seq, ((2,), [2])) == \
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
assert reshape(seq, (1, [2], 1)) == \
[(1, [2, 3], 4), (5, [6, 7], 8)]
assert reshape(tuple(seq), ([[1], 1, (2,)],)) == \
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
assert reshape(tuple(seq), ([1], 1, (2,))) == \
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
assert reshape(list(range(12)), [2, [3], set([2]), (1, (3,), 1)]) == \
[[0, 1, [2, 3, 4], set([5, 6]), (7, (8, 9, 10), 11)]]
def test_uniq():
assert list(uniq(p.copy() for p in partitions(4))) == \
[{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}]
assert list(uniq(x % 2 for x in range(5))) == [0, 1]
assert list(uniq('a')) == ['a']
assert list(uniq('ababc')) == list('abc')
assert list(uniq([[1], [2, 1], [1]])) == [[1], [2, 1]]
assert list(uniq(permutations(i for i in [[1], 2, 2]))) == \
[([1], 2, 2), (2, [1], 2), (2, 2, [1])]
assert list(uniq([2, 3, 2, 4, [2], [1], [2], [3], [1]])) == \
[2, 3, 4, [2], [1], [3]]
def test_kbins():
assert len(list(kbins('1123', 2, ordered=1))) == 24
assert len(list(kbins('1123', 2, ordered=11))) == 36
assert len(list(kbins('1123', 2, ordered=10))) == 10
assert len(list(kbins('1123', 2, ordered=0))) == 5
assert len(list(kbins('1123', 2, ordered=None))) == 3
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins([0, 0, 1], 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [0, 1]]
[[0, 0], [1]]
ordered = 0
[[0, 0], [1]]
[[0, 1], [0]]
ordered = 1
[[0], [0, 1]]
[[0], [1, 0]]
[[1], [0, 0]]
ordered = 10
[[0, 0], [1]]
[[1], [0, 0]]
[[0, 1], [0]]
[[0], [0, 1]]
ordered = 11
[[0], [0, 1]]
[[0, 0], [1]]
[[0], [1, 0]]
[[0, 1], [0]]
[[1], [0, 0]]
[[1, 0], [0]]\n''')
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins(list(range(3)), 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [1, 2]]
[[0, 1], [2]]
ordered = 0
[[0, 1], [2]]
[[0, 2], [1]]
[[0], [1, 2]]
ordered = 1
[[0], [1, 2]]
[[0], [2, 1]]
[[1], [0, 2]]
[[1], [2, 0]]
[[2], [0, 1]]
[[2], [1, 0]]
ordered = 10
[[0, 1], [2]]
[[2], [0, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[0], [1, 2]]
[[1, 2], [0]]
ordered = 11
[[0], [1, 2]]
[[0, 1], [2]]
[[0], [2, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[1, 0], [2]]
[[1], [2, 0]]
[[1, 2], [0]]
[[2], [0, 1]]
[[2, 0], [1]]
[[2], [1, 0]]
[[2, 1], [0]]\n''')
def test_has_dups():
assert has_dups(set()) is False
assert has_dups(list(range(3))) is False
assert has_dups([1, 2, 1]) is True
def test__partition():
assert _partition('abcde', [1, 0, 1, 2, 0]) == [
['b', 'e'], ['a', 'c'], ['d']]
assert _partition('abcde', [1, 0, 1, 2, 0], 3) == [
['b', 'e'], ['a', 'c'], ['d']]
output = (3, [1, 0, 1, 2, 0])
assert _partition('abcde', *output) == [['b', 'e'], ['a', 'c'], ['d']]
|
hrashk/sympy
|
sympy/utilities/tests/test_iterables.py
|
Python
|
bsd-3-clause
| 24,090
| 0.00083
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos (danielcampos@avanzosc.es) Date: 29/09/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
|
oihane/odoomrp-wip
|
mrp_byproduct_operations/__init__.py
|
Python
|
agpl-3.0
| 945
| 0
|
#!/usr/bin/env python
"""
make_a_star_cluster.py creates a model star cluster,
which can then be used in N-body simulations or for other purposes.
It requires AMUSE, which can be downloaded from http://amusecode.org or
https://github.com/amusecode/amuse.
Currently not feature-complete yet, and function/argument names are
subject to change.
-- Steven Rieder steven at rieder punt nl
"""
import logging
import numpy
from amuse.units import (
units,
nbody_system,
generic_unit_converter,
)
from amuse.units.trigo import sin, cos
from amuse.datamodel.particles import Particles
from amuse.ic.plummer import new_plummer_sphere
from amuse.ic.kingmodel import new_king_model
try:
from amuse.ic.fractalcluster import new_fractal_cluster_model
except ImportError:
new_fractal_cluster_model = None
def new_masses(
stellar_mass=False,
initial_mass_function="salpeter",
upper_mass_limit=125. | units.MSun,
lower_mass_limit=0.1 | units.MSun,
number_of_stars=1024,
exceed_mass=True,
):
imf_name = initial_mass_function.lower()
if imf_name == "salpeter":
from amuse.ic.salpeter import new_salpeter_mass_distribution
initial_mass_function = new_salpeter_mass_distribution
elif imf_name == "kroupa":
from amuse.ic.brokenimf import new_kroupa_mass_distribution
initial_mass_function = new_kroupa_mass_distribution
elif imf_name == "flat":
from amuse.ic.flatimf import new_flat_mass_distribution
initial_mass_function = new_flat_mass_distribution
elif imf_name == "fixed":
from amuse.ic.flatimf import new_flat_mass_distribution
def new_fixed_mass_distribution(
number_of_particles, *list_arguments, **keyword_arguments
):
return new_flat_mass_distribution(
number_of_particles,
mass_min=stellar_mass/number_of_stars,
mass_max=stellar_mass/number_of_stars,
)
initial_mass_function = new_fixed_mass_distribution
if stellar_mass:
# best underestimate mean_mass a bit for faster results
mean_mass = 0.25 | units.MSun
mass = initial_mass_function(
int(stellar_mass / mean_mass),
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
previous_number_of_stars = len(mass)
if exceed_mass:
# Allow one final star to exceed stellar_mass
final_star = 1+numpy.argmax(mass.cumsum() > stellar_mass)
if (final_star > 1 and final_star < len(mass)):
mass = mass[:final_star]
else:
# Limit to stars not exceeding stellar_mass
mass = mass[mass.cumsum() < stellar_mass]
additional_mass = [] | units.MSun
while True:
if previous_number_of_stars + len(additional_mass) > len(mass):
break
# We don't have enough stars yet, or at least not tested this
additional_mass = initial_mass_function(
int(stellar_mass / mean_mass),
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
if exceed_mass:
# Allow one final star to exceed stellar_mass
final_star = 1+numpy.argmax(
mass.sum() + additional_mass.cumsum() > stellar_mass
)
if (final_star > 1 and final_star < len(mass)):
additional_mass = additional_mass[:final_star]
else:
# Limit to stars not exceeding stellar_mass
additional_mass = additional_mass[
mass.sum() + additional_mass.cumsum() < stellar_mass
]
mass.append(additional_mass)
number_of_stars = len(mass)
else:
# Give stars their mass
mass = initial_mass_function(
number_of_stars,
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
return mass
def new_star_cluster(
stellar_mass=False,
initial_mass_function="salpeter",
upper_mass_limit=125. | units.MSun,
lower_mass_limit=0.1 | units.MSun,
number_of_stars=1024,
effective_radius=3.0 | units.parsec,
star_distribution="plummer",
star_distribution_w0=7.0,
star_distribution_fd=2.0,
star_metallicity=0.01,
# initial_binary_fraction=0,
**kwargs
):
"""
Create stars.
When using an IMF, either the stellar mass is fixed (within
stochastic error) or the number of stars is fixed. When using
equal-mass stars, both are fixed.
"""
mass = new_masses(
stellar_mass=stellar_mass,
initial_mass_function=initial_mass_function,
upper_mass_limit=upper_mass_limit,
lower_mass_limit=lower_mass_limit,
number_of_stars=number_of_stars,
)
total_mass = mass.sum()
number_of_stars = len(mass)
print(number_of_stars, total_mass, effective_radius)
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(
total_mass,
1. | units.kms,
effective_radius,
)
# Give stars a position and velocity, based on the distribution model.
if star_distribution == "plummer":
stars = new_plummer_sphere(
number_of_stars,
convert_nbody=converter,
)
elif star_distribution == "king":
stars = new_king_model(
number_of_stars,
star_distribution_w0,
convert_nbody=converter,
)
elif star_distribution == "fractal":
stars = new_fractal_cluster_model(
number_of_stars,
fractal_dimension=star_distribution_fd,
convert_nbody=converter,
)
else:
return -1, "No stellar distribution"
# set the stellar mass.
stars.mass = mass
# set other stellar parameters.
stars.metallicity = star_metallicity
# Virialize the star cluster if > 1 star
if len(stars) > 1:
stars.move_to_center()
stars.scale_to_standard(
convert_nbody=converter,
# virial_ratio=virial_ratio,
# smoothing_length_squared= ...,
)
# Record the cluster's initial parameters to the particle distribution
stars.collection_attributes.initial_mass_function = \
initial_mass_function.lower()
stars.collection_attributes.upper_mass_limit = upper_mass_limit
stars.collection_attributes.lower_mass_limit = lower_mass_limit
stars.collection_attributes.number_of_stars = number_of_stars
stars.collection_attributes.effective_radius = effective_radius
stars.collection_attributes.star_distribution = star_distribution
stars.collection_attributes.star_distribution_w0 = star_distribution_w0
stars.collection_attributes.star_distribution_fd = star_distribution_fd
stars.collection_attributes.star_metallicity = star_metallicity
# Derived/legacy values
stars.collection_attributes.converter_mass = \
converter.to_si(1 | nbody_system.mass)
stars.collection_attributes.converter_length =\
converter.to_si(1 | nbody_system.length)
stars.collection_attributes.converter_speed =\
converter.to_si(1 | nbody_system.speed)
return stars
def new_stars_from_sink(
origin,
upper_mass_limit=125 | units.MSun,
lower_mass_limit=0.1 | units.MSun,
default_radius=0.25 | units.pc,
velocity_dispersion=1 | units.kms,
logger=None,
initial_mass_function="kroupa",
distribution="random",
randomseed=None,
**keyword_arguments
):
"""
Form stars from an origin particle that keeps track of the properties of
this region.
"""
logger = logger or logging.getLogger(__name__)
if randomseed is not None:
logger.info("setting random seed to %i", randomseed)
numpy.random.seed(randomseed)
try:
initialised = origin.initialised
except AttributeError:
initialised = False
if not initialised:
logger.debug(
"Initialising origin particle %i for star formation",
origin.key
)
next_mass = new_star_cluster(
initial_mass_function=initial_mass_function,
upper_mass_limit=upper_mass_limit,
lower_mass_limit=lower_mass_limit,
number_of_stars=1,
**keyword_arguments
)
origin.next_primary_mass = next_mass[0].mass
origin.initialised = True
if origin.mass < origin.next_primary_mass:
logger.debug(
"Not enough in star forming region %i to form the next star",
origin.key
)
return Particles()
mass_reservoir = origin.mass - origin.next_primary_mass
stellar_masses = new_star_cluster(
stellar_mass=mass_reservoir,
upper_mass_limit=upper_mass_limit,
lower_mass_limit=lower_mass_limit,
imf=initial_mass_function,
).mass
number_of_stars = len(stellar_masses)
new_stars = Particles(number_of_stars)
new_stars.age = 0 | units.yr
new_stars[0].mass = origin.next_primary_mass
new_stars[1:].mass = stellar_masses[:-1]
origin.next_primary_mass = stellar_masses[-1]
new_stars.position = origin.position
new_stars.velocity = origin.velocity
try:
radius = origin.radius
except AttributeError:
radius = default_radius
rho = numpy.random.random(number_of_stars) * radius
theta = (
numpy.random.random(number_of_stars)
* (2 * numpy.pi | units.rad)
)
phi = (
numpy.random.random(number_of_stars) * numpy.pi | units.rad
)
x = rho * sin(phi) * cos(theta)
y = rho * sin(phi) * sin(theta)
z = rho * cos(phi)
new_stars.x += x
new_stars.y += y
new_stars.z += z
velocity_magnitude = numpy.random.normal(
scale=velocity_dispersion.value_in(units.kms),
size=number_of_stars,
) | units.kms
velocity_theta = (
numpy.random.random(number_of_stars)
* (2 * numpy.pi | units.rad)
)
velocity_phi = (
numpy.random.random(number_of_stars)
* (numpy.pi | units.rad)
)
vx = velocity_magnitude * sin(velocity_phi) * cos(velocity_theta)
vy = velocity_magnitude * sin(velocity_phi) * sin(velocity_theta)
vz = velocity_magnitude * cos(velocity_phi)
new_stars.vx += vx
new_stars.vy += vy
new_stars.vz += vz
new_stars.origin = origin.key
origin.mass -= new_stars.total_mass()
return new_stars
|
rieder/MASC
|
src/amuse/ext/masc/cluster.py
|
Python
|
mit
| 10,655
| 0
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
from gl_rep.data_loaders import airq_data_loader, simulation_loader, physionet_data_loader, har_data_loader
from gl_rep.glr import GLR
from gl_rep.models import EncoderGlobal, EncoderLocal, WindowDecoder
from gl_rep.utils import plot_reps, train_glr
import tensorflow as tf
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def main(args):
"""
Train and validate our local and global representation learning framework for different dataset
"""
is_continue = False
# Load the data and experiment configurations
with open('configs.json') as config_file:
configs = json.load(config_file)[args.data]
if args.data=='air_quality':
n_epochs = 250
lr = 1e-3
trainset, validset, testset, _ = airq_data_loader(normalize="mean_zero")
elif args.data=='simulation':
n_epochs = 100
lr = 1e-2
trainset, validset, testset, _, _ = simulation_loader(normalize="none", mask_threshold=0.0)
elif args.data == 'physionet':
n_epochs = 200
lr = 1e-3
trainset, validset, testset, _ = physionet_data_loader(normalize="mean_zero")
elif args.data=='har':
n_epochs = 150
lr = 1e-3
trainset, validset, testset, normalization_specs = har_data_loader(normalize='none')
# Create the representation learning models
zt_encoder = EncoderLocal(zl_size=configs["zl_size"], hidden_sizes=configs["glr_local_encoder_size"])
zg_encoder = EncoderGlobal(zg_size=configs["zg_size"], hidden_sizes=configs["glr_global_encoder_size"])
dec = WindowDecoder(output_size=configs["feature_size"], output_length=configs["window_size"],
hidden_sizes=configs["glr_decoder_size"])
rep_model = GLR(global_encoder=zg_encoder, local_encoder=zt_encoder, decoder=dec,
window_size=configs["window_size"], time_length=configs["t_len"],
data_dim=configs["feature_size"], kernel_scales=configs["kernel_scales"],
kernel=configs["kernels"], beta=configs["beta"], M=configs["mc_samples"], sigma=.5,
lamda=args.lamda, length_scale=configs["length_scale"], p=15)
# Train the decoupled local and global representation learning modules
if args.train:
if is_continue:
rep_model.load_weights('./ckpt/glr_%s_lambda%.1f' %(args.data, args.lamda))
train_glr(rep_model, trainset, validset, lr=lr, n_epochs=n_epochs, data=args.data)
# Plot summary performance graphs for the learning framework,
# including the representation distribution and signal reconstruction plots
rep_model.load_weights('./ckpt/glr_%s_lambda%.1f' %(args.data, args.lamda))
plot_reps(testset, rep_model, args.data)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='air_quality', help="dataset to use")
parser.add_argument('--lamda', type=float, default=1., help="regularization weight")
parser.add_argument('--train', action='store_true')
args = parser.parse_args()
main(args)
|
googleinterns/local_global_ts_representation
|
main.py
|
Python
|
apache-2.0
| 3,904
| 0.00666
|
filenames = ['firstNames', 'secondNames', 'famousWrestlers', 'categories', 'jobs']
for filename in filenames:
with open('%s.csv' % filename, 'r') as f:
namelist = []
for name in f.read().split('\n'):
if len(name)>1: namelist.append(name)
with open('../js/%s.js' % filename, 'w') as dest_f:
dest_f.write('%s = %s;' % (filename, namelist))
|
staudt/gimmick-generator
|
lists/generate.py
|
Python
|
mit
| 352
| 0.028409
|
#!/usr/bin/env python
# coding=utf-8
from scrapy.spiders import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor as link
from scrapy.http import Request, FormRequest
from scrapy.selector import Selector
from illustrious.items import ProblemItem, SolutionItem, AccountItem
from datetime import datetime
import time
LANGUAGE = {
'g++': '0',
'gcc': '1',
'java': '2',
'pascal': '3',
'c++': '4',
'c': '5',
'fortran': '6'
}
class PojInitSpider(CrawlSpider):
name = 'poj_init'
allowed_domains = ['poj.org']
start_urls = [
'http://poj.org/problemlist'
]
download_delay = 5
rules = [
Rule(
link(
allow=('problemlist\?volume=[0-9]+'),
unique=True
)
),
Rule(
link(
allow=('problem\?id=[0-9]+')
), callback='problem_item'
)
]
def problem_item(self, response):
html = response.body.\
replace('<=', ' ≤ ').\
replace(' < ', ' < ').\
replace(' > ', ' > ').\
replace('>=', ' ≥ ')
sel = Selector(text=html)
item = ProblemItem()
print response
item['oj'] = 'poj'
item['problem_id'] = response.url[-4:]
item['problem_url'] = response.url
item['title'] = sel.css('.ptt').xpath('./text()').extract()[0]
item['description'] = sel.css('.ptx').extract()[0]
item['input'] = sel.css('.ptx').extract()[1]
item['output'] = sel.css('.ptx').extract()[2]
try:
item['time_limit'] = sel.css('.plm').re('Case\sT[\S*\s]*MS')[0][21:]
except:
item['time_limit'] = sel.css('.plm').re('T[\S*\s]*MS')[0][16:]
item['memory_limit'] = sel.css('.plm').re('Me[\S*\s]*K')[0]
item['sample_input'] = sel.css('.sio').extract()[0]
item['sample_output'] = sel.css('.sio').extract()[1]
item['update_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return item
class PojProblemSpider(Spider):
name = 'poj_problem'
allowed_domains = ['poj.org']
def __init__(self, problem_id='1000', *args, **kwargs):
self.problem_id = problem_id
super(PojProblemSpider, self).__init__(*args, **kwargs)
self.start_urls = [
'http://poj.org/problem?id=%s' % problem_id
]
def parse(self, response):
html = response.body.\
replace('<=', ' ≤ ').\
replace(' < ', ' < ').\
replace(' > ', ' > ').\
replace('>=', ' ≥ ')
sel = Selector(text=html)
item = ProblemItem()
item['oj'] = 'poj'
item['problem_id'] = self.problem_id
item['problem_url'] = response.url
item['title'] = sel.css('.ptt').xpath('./text()').extract()[0]
item['description'] = sel.css('.ptx').extract()[0]
item['input'] = sel.css('.ptx').extract()[1]
item['output'] = sel.css('.ptx').extract()[2]
try:
item['time_limit'] = sel.css('.plm').re('Case\sT[\S*\s]*MS')[0][21:]
except:
item['time_limit'] = sel.css('.plm').re('T[\S*\s]*MS')[0][16:]
item['memory_limit'] = sel.css('.plm').re('Me[\S*\s]*K')[0][18:]
item['sample_input'] = sel.css('.sio').extract()[0]
item['sample_output'] = sel.css('.sio').extract()[1]
item['update_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return item
class PojSubmitSpider(CrawlSpider):
name = 'poj_submit'
allowed_domains = ['poj.org']
login_url = 'http://poj.org/login'
submit_url = 'http://poj.org/submit'
login_verify_url = 'http://poj.org/loginlog'
source = \
'I2luY2x1ZGUgPHN0ZGlvLmg+CgppbnQgbWFpbigpCnsKICAgIGludCBhLGI7CiAgICBzY2FuZigiJWQgJWQiLCZhLCAmYik7CiAgICBwcmludGYoIiVkXG4iLGErYik7CiAgICByZXR1cm4gMDsKfQ=='
start_urls = [
"http://poj.org/status"
]
download_delay = 0.5
rules = [
Rule(link(allow=('/status\?top=[0-9]+'), deny=('status\?bottom=[0-9]+')), follow=True, callback='parse_start_url')
]
is_login = False
def __init__(self,
solution_id='None',
problem_id='1000',
language='g++',
source=None,
username='sdutacm1',
password='sdutacm', *args, **kwargs):
super(PojSubmitSpider, self).__init__(*args, **kwargs)
self.solution_id = solution_id
self.username = username
self.password = password
self.problem_id = problem_id
self.language = language
if source is not None:
self.source = source
def start_requests(self):
return [FormRequest(self.login_url,
formdata = {
'user_id1': self.username,
'password1': self.password,
'B1': 'login',
},
callback = self.after_login,
)]
def after_login(self, response):
return [Request(self.login_verify_url,
callback = self.login_verify
)]
def login_verify(self, response):
if response.url == self.login_verify_url:
self.is_login = True
self.login_time = time.mktime(time.strptime(\
response.headers['Date'], \
'%a, %d %b %Y %H:%M:%S %Z')) + (8 * 60 * 60)
time.sleep(1)
return [FormRequest(self.submit_url,
formdata = {
'problem_id': self.problem_id,
'language': LANGUAGE.get(self.language, '0'),
'source': self.source,
'submit': 'Submit',
'encoded': '1'
},
callback = self.after_submit,
dont_filter = True
)]
else:
return Request(self.start_urls[0], callback=self.parse_start_url)
def after_submit(self, response):
time.sleep(3)
for url in self.start_urls:
yield self.make_requests_from_url(url)
def parse_start_url(self, response):
sel = Selector(response)
item = SolutionItem()
item['oj'] = 'poj'
item['problem_id'] = self.problem_id
item['language'] = self.language
item['solution_id'] = self.solution_id
if self.is_login:
for tr in sel.xpath('//table')[-1].xpath('.//tr')[1:]:
user = tr.xpath('.//td/a/text()').extract()[0]
_submit_time = tr.xpath('.//td/text()').extract()[-1]
if user == self.username:
item['submit_time'] = _submit_time
item['run_id'] = tr.xpath('.//td/text()').extract()[0]
try:
item['memory'] = \
tr.xpath('.//td')[4].xpath('./text()').extract()[0]
item['time'] = \
tr.xpath('.//td')[5].xpath('./text()').extract()[0]
except:
pass
item['code_length'] = tr.xpath('.//td/text()').extract()[-2]
item['result'] = tr.xpath('.//td').xpath('.//font/text()').extract()[0]
self._rules = []
return item
else:
item['result'] = 'Submit Error'
self._rules = []
return item
class PojStatusSpider(Spider):
name = 'poj_status'
allowed_domains = ['poj.org']
def __init__(self, run_id=13881167, *args, **kwargs):
super(PojStatusSpider, self).__init__(*args, **kwargs)
self.run_id = str(run_id)
self.start_urls = [
'http://poj.org/status?top=%s' % (int(run_id) + 1)
]
def parse(self, response):
sel = Selector(response)
item = SolutionItem()
item['oj'] = 'poj'
item['run_id'] = self.run_id
for tr in sel.xpath('//table')[-1].xpath('.//tr')[1:]:
runid = tr.xpath('.//td/text()').extract()[0]
_submit_time = tr.xpath('.//td/text()').extract()[-1]
if runid == self.run_id:
item['submit_time'] = _submit_time
item['problem_id'] = tr.xpath('.//td/a/text()').extract()[1]
item['language'] = tr.xpath('.//td')[6].xpath('.//text()').extract()[0]
try:
item['memory'] = \
tr.xpath('.//td')[4].xpath('./text()').extract()[0]
item['time'] = \
tr.xpath('.//td')[5].xpath('./text()').extract()[0]
except:
pass
item['code_length'] = tr.xpath('.//td/text()').extract()[-2]
item['result'] = tr.xpath('.//td').xpath('.//font/text()').extract()[0]
self._rules = []
return item
else:
item['result'] = 'wait'
self._rules = []
class PojAccountSpider(Spider):
name = 'poj_user'
allowed_domains = ['poj.org']
login_url = 'http://poj.org/login'
login_verify_url = 'http://poj.org/loginlog'
accepted_url = \
'http://poj.org/status?problem_id=&user_id=%s&result=0&language='
download_delay = 1
is_login = False
solved = {}
def __init__(self,
username='sdutacm1',
password='sdutacm', *args, **kwargs):
super(PojAccountSpider, self).__init__(*args, **kwargs)
self.username = username
self.password = password
self.start_urls = [
"http://poj.org/userstatus?user_id=%s" % username
]
def start_requests(self):
return [FormRequest(self.login_url,
formdata = {
'user_id1': self.username,
'password1': self.password,
'B1': 'login',
},
callback = self.after_login,
)]
def after_login(self, response):
return [Request(self.login_verify_url,
callback = self.login_verify
)]
def login_verify(self, response):
if response.url == self.login_verify_url:
self.is_login = True
for url in self.start_urls:
yield self.make_requests_from_url(url)
def parse(self, response):
sel = Selector(response)
self.item = AccountItem()
self.item['oj'] = 'poj'
self.item['username'] = self.username
if self.is_login:
try:
self.item['rank'] = sel.xpath('//center/table/tr')[1].\
xpath('.//td/font/text()').extract()[0]
self.item['accept'] = sel.xpath('//center/table/tr')[2].\
xpath('.//td/a/text()').extract()[0]
self.item['submit'] = sel.xpath('//center/table/tr')[3].\
xpath('.//td/a/text()').extract()[0]
yield Request(self.accepted_url % self.username,
callback = self.accepted
)
self.item['status'] = 'Authentication Success'
except:
self.item['status'] = 'Unknown Error'
else:
self.item['status'] = 'Authentication Failed'
yield self.item
def accepted(self, response):
sel = Selector(response)
next_url = sel.xpath('//p/a/@href')[2].extract()
table_tr = sel.xpath('//table')[-1].xpath('.//tr')[1:]
for tr in table_tr:
name = tr.xpath('.//td/a/text()').extract()[0]
problem_id = tr.xpath('.//td[3]/a/text()').extract()[0].strip()
submit_time = tr.xpath('.//td/text()').extract()[-1]
self.solved[problem_id] = submit_time
self.item['solved'] = self.solved
if table_tr:
yield Request('http://' + self.allowed_domains[0] + '/' + next_url,
callback = self.accepted
)
yield self.item
|
Coderhypo/makinami
|
illustrious/spiders/poj.py
|
Python
|
mit
| 12,789
| 0.005943
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from _IECoreArnold import *
from UniverseBlock import UniverseBlock
|
code-google-com/cortex-vfx
|
contrib/IECoreArnold/python/IECoreArnold/__init__.py
|
Python
|
bsd-3-clause
| 1,852
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.