repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
siosio/intellij-community
|
python/testData/mover/multiLineSelectionDifferentIndentLevelsMoveToEmptyLine_afterDown.py
|
Python
|
apache-2.0
| 74
| 0.081081
|
pass
<caret
|
><selection>n = 0
while n:
print("sp
|
am")</selection>
pass
|
CloudBoltSoftware/cloudbolt-forge
|
blueprints/azure_functions/create.py
|
Python
|
apache-2.0
| 3,475
| 0.007194
|
"""
Creates an Azure serverless function.
"""
from common.methods import set_progress
from infrastructure.models import CustomField
from common.methods import generate_string_from_template
import os, json
def create_custom_fields_as_needed():
CustomField.objects.get_or_create(
name='azure_function_name', type='STR',
defaults={'label': 'Azure function name', 'description': 'Name of a deployed azure function', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='resource_group_name', type='STR',
defaults={'label': 'Azure Resource Group', 'description': 'Used by the Azure blueprints',
'show_as_attribute': True}
)
def run(job, **kwargs):
resource = kwargs.get('resource')
function_name = '{{ function_name }}'
storage_account_name = function_name + "storageaccount"
file_location = "{{ file_location }}"
if file_location.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
file_location = file_location.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
create_custom_fields_as_needed()
#check if function name is already in use
function_name_check = "az functionapp list"
val = os.popen(function_name_check).read()
function_name_check_response = json.loads(val)
used_names = []
for function in function_name_check_response:
used_names.append(function['name'])
if function_name in used_names:
response = "{0} function name is already in use. Please use a different one.".format(function_name)
return "failure", response, ""
#create a resource group for the fucntion
resource_group_name = function_name + "-resource-group"
resource_group_create = 'az group create --name ' + resource_group_name + ' --location westeurope'
os.system(resource_group_create)
#check if storage name is already in use, create a function storage
name_check = "az storage account check-name --name {0}".format(storage_account_name)
name_check_response = json.loads(os.popen(name_check).read())
if name_check_response['nameAvailable']:
create_storage_command = "az storage account create --name {0} --location westeurope --resource-group {1} --sku Standard_LRS".fo
|
rmat(storage_account_name, resource_group_name)
os.system(create_storage_command)
|
else:
return "failure", '{0}'.format(name_check_response['reason']), ""
#create the azure function
create_function_command = "az functionapp create --name " + function_name + " --storage-account " + storage_account_name + " --consumption-plan-location westeurope --resource-group " + resource_group_name
try:
create_fucntion_check = json.loads(os.popen(create_function_command).read())
except Exception as e:
return 'failure', 'the function app could not be created', '{0}'.format(e)
if create_fucntion_check['name'] == function_name:
set_progress('The function app has been succesfully created')
else:
return 'failure', 'The app could not be created', ''
resource.name = function_name
resource.resource_group_name = resource_group_name
resource.save()
fxn = "az functionapp deployment source config-zip -g {0} -n {1} --src {2}".format(resource_group_name, function_name, file_location)
json.loads(os.popen(fxn).read())
return 'success', 'The function has successfully been created.' , ''
|
nijel/weblate
|
weblate/addons/apps.py
|
Python
|
gpl-3.0
| 905
| 0
|
#
# Copyright © 2012–2022 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later ver
|
sion.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.apps import AppConfig
class AddonsConfig(AppConfig):
|
name = "weblate.addons"
label = "addons"
verbose_name = "Add-ons"
|
gregbdunn/aws-ec2rescue-linux
|
tools/moduletests/unit/test_arpcache.py
|
Python
|
apache-2.0
| 12,661
| 0.003001
|
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the arpcache module
"""
import os
import subprocess
import sys
import unittest
import mock
import moduletests.src.arpcache
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class TestArpcache(unittest.TestCase):
config_file_path = "/etc/sysctl.d/55-arp-gc_thresh1.conf"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("subprocess.check_output")
def test_detect_noproblem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 0"
self.assertFalse(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
def test_detect_problem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 1"
self.assertTrue(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
"1", "test", "/etc/sysctl.d/55-arp-gc_thresh1.conf: no such file or directory"))
def test_fix_cpe(self, check_output_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(subprocess.CalledProcessError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] 'sysctl -w net.ipv4.neigh.default.gc_thresh1=0' failed for running system\n"))
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="stuff"))
def test_fix_exists_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"something else\n"))
def test_fix_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arp
|
cache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 =
|
0\n"
"net.ipv4.neigh.default.gc_thresh1 = 0\n"))
def test_fix_sudo_true_found_twice(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", side_effect=IOError)
def test_fix_writefail(self, open_mock, exists_mock, check_output_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] Failed to write config to /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
@mock.patch("moduletests.src.arpcache.detect", return_value=False)
def test_run_success(self, detect_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled.\n"))
self.assertTrue(detect_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
def test_run_no_remediate(self, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": False,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
moduletests.src.arpcache.run()
self.assertTrue("[UNFIXED] Remediation impossible without sudo and --remediate.\n"
"-- Running as root/sudo: True\n"
"-- Required --remediate flag specified: False\n"
"[FAILURE] Aggressive arp caching is enabled."
in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.arpcache.backup", return_value=True)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_failure_isfile(self, restore_mock, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.conf
|
demianw/tract_querier
|
tract_querier/tensor/tests/test_scalar_measures.py
|
Python
|
bsd-3-clause
| 2,117
| 0.000945
|
from .. import scalar_measures
import numpy
from numpy.testing import assert_array_almost_equal
def test_fractional_anisotropy(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
fa = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
|
tensors[i] = tt
ev = numpy.linalg.eigvalsh(tt)
mn = ev.mean()
fa[i] = numpy.sqrt(1.5 * ((ev - mn) ** 2).sum() / (ev ** 2).sum())
assert_array_almost_equal(fa, scalar_measures.fractional_anisotropy(tensors))
def test_volume_fraction(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
vf = numpy.empty(N)
for i, t in enumerate(
|
tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
ev = numpy.linalg.eigvalsh(tt)
mn = ev.mean()
vf[i] = 1 - ev.prod() / (mn ** 3)
assert_array_almost_equal(vf, scalar_measures.volume_fraction(tensors))
def test_tensor_determinant(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
dt = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
dt[i] = numpy.linalg.det(tt)
assert_array_almost_equal(dt, scalar_measures.tensor_det(tensors))
def test_tensor_traces(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
res = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
res[i] = numpy.trace(tt)
assert_array_almost_equal(res, scalar_measures.tensor_trace(tensors))
def test_tensor_contraction(N=10, random=numpy.random.RandomState(0)):
tensors1 = random.randn(N, 3, 3)
tensors2 = random.randn(N, 3, 3)
res = numpy.empty(N)
for i in range(N):
t1 = tensors1[i]
t2 = tensors2[i]
tt1 = numpy.dot(t1, t1.T)
tt2 = numpy.dot(t2, t2.T)
tensors1[i] = tt1
tensors2[i] = tt2
res[i] = numpy.trace(numpy.dot(tt1, tt2.T))
assert_array_almost_equal(res, scalar_measures.tensor_contraction(tensors1, tensors2))
|
jokajak/itweb
|
data/env/lib/python2.6/site-packages/SQLAlchemy-0.6.7-py2.6.egg/sqlalchemy/dialects/oracle/base.py
|
Python
|
gpl-3.0
| 43,930
| 0.005304
|
# oracle/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Oracle database.
Oracle version 8 through current (11g at the time of this writing) are supported.
For information on connecting via specific drivers, see the documentation
for that driver.
Connect Arguments
-----------------
The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which
affect the behavior of the dialect regardless of driver in use.
* *use_ansi* - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults
to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins.
* *optimize_limits* - defaults to ``False``. see the section on LIMIT/OFFSET.
* *use_binds_for_limits* - defaults to ``True``. see the section on LIMIT/OFFSET.
Auto Increment Behavior
-----------------------
SQLAlchemy Table objects which include integer primary keys are usually assumed to have
"autoincrementing" behavior, meaning they can generate their own primary key values upon
INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences
to produce these values. With the Oracle dialect, *a sequence must always be explicitly
specified to enable autoincrement*. This is divergent with the majority of documentation
examples which assume the usage of an autoincrement-capable database. To specify sequences,
use the sqlalchemy.schema.Sequence object which is passed to a Column construct::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
Column(...), ...
)
This step is also required when using table reflection, i.e. autoload=True::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
autoload=True
)
Identifier Casing
-----------------
In Oracle, the data dictionary represents all case insensitive identifier names
using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier
name to be case insensitive. The Oracle dialect converts all case insensitive identifiers
to and from those two formats during schema level communication, such as reflection of
tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a
case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches
against data dictionary data received from Oracle, so unless identifier names have been
truly created as case sensitive (i.e. using quoted names), all lowercase names should be
used on the SQLAlchemy side.
Unicode
-------
SQLAlchemy 0.6 uses the "native unicode" mode provided as of cx_oracle 5. cx_oracle 5.0.2
or greater is recommended for support of NCLOB. If not using cx_oracle 5, the NLS_LANG
environment variable needs to be set in order for the oracle client library to use
proper encoding, such as "AMERICAN_AMERICA.UTF8".
Also note that Oracle supports unicode data through the NVARCHAR and NCLOB data types.
When using the SQLAlchemy Unicode and UnicodeText types, these DDL types will be used
within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still
requires NLS_LANG to be set.
LIMIT/OFFSET Support
--------------------
Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
is taken from
http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .
There are two options which affect its behavior:
* the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this
optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`.
* the values passed for the limit/offset are sent as bound parameters. Some users have observed
that Oracle produces a poor query plan when the values are sent as binds and not
rendered literally. To render the limit/offset values literally within the SQL
statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`.
Some users have reported better performance when the entirely different approach of a
window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note
that the majority of users don't observe this). To suit this case the
method used for LIMIT/OFFSET can be replaced entirely. See the recipe at
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
which installs a select compiler that overrides the generation of limit/offset with
a window function.
ON UPDATE CASCADE
-----------------
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution
is available at http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
"deferrable=True, initially='deferred'" keyword arguments,
and specify "passive_updates=False" on each relationship().
Oracle 8 Compatibility
----------------------
When Oracle 8 is detected, the dialect internally configures itself to the following
behaviors:
* the use_ansi flag is set to False. This has the effect of converting all
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
instead. This because these types don't seem to work correctly on Oracle 8
even though they are available. The :class:`~sqlalchemy.types.NVARCHAR`
and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
encodes all Python unicode objects to "string" before passing in as bind parameters.
Synonym/DBLINK Reflection
-------------------------
When using reflection with Table objects, the dialect can optionally search for tables
indicated by synonyms that reference DBLINK-ed tables by passing the flag
oracle_resolve_synonyms=True as a keyword argument to the Table construct. If DBLINK
is not in use this
|
flag should be left off.
"""
import random, re
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, log
from sqlalchemy.engine import de
|
fault, base, reflection
from sqlalchemy.sql import compiler, visitors, expression
from sqlalchemy.sql import operators as sql_operators, functions as sql_functions
from sqlalchemy import types as sqltypes
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, \
BLOB, CLOB, TIMESTAMP, FLOAT
RESERVED_WORDS = set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '
'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '
'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE ANY '
'TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE BY ASC '
'REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES AS IN VIEW '
'EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS NOT TRIGGER '
'ELSE CREATE INTERSECT PCTFREE DISTINCT USER CONNECT SET MODE '
'OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR DECIMAL UNION PUBLIC '
'AND START UID COMMENT'.split())
class RAW(sqltypes.LargeBinary):
pass
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = 'NCLOB'
VARCHAR2 = VARCHAR
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = 'NUMBER'
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(implt
|
impactlab/eemeter
|
eemeter/modeling/split.py
|
Python
|
mit
| 7,118
| 0.000281
|
import logging
import traceback
import numpy as np
from eemeter.structures import EnergyTrace
logger = logging.getLogger(__name__)
class SplitModeledEnergyTrace(object):
''' Light wrapper around models applicable to a single trace which
fits and predicts multiple models for different segments.
Parameters
----------
trace : eemeter.structures.EnergyTrace
Trace to be modeled.
formatter : eemeter.modeling.formatter.Formatter
Formatter to prep trace data for modeling.
model_mapping : dict
Items of this dictionary map `modeling_period_label` s to models
modeling_period_set : eemeter.structures.ModelingPeriodSet
The set of modeling periods over which models should be applicable.
'''
def __init__(self, trace, formatter, model_mapping, modeling_period_set):
self.trace = trace
self.formatter = formatter
self.model_mapping = model_mapping
self.modeling_period_set = modeling_period_set
self.fit_outputs = {}
def __repr__(self):
return (
"SplitModeledEnergyTrace(trace={}, formatter={},"
" model_mapping={}, modeling_period_set={})"
.format(self.trace, self.formatter, self.model_mapping,
self.modeling_period_set)
)
def fit(self, weather_source):
''' Fit all models associated with this trace.
Parameters
----------
weather_source : eemeter.weather.ISDWeatherSource
Weather source to use in creating covariate data.
'''
for modeling_period_label, modeling_period in \
self.modeling_period_set.iter_modeling_periods():
filtered_data = self._filter_by_modeling_period(
self.trace, modeling_period)
filtered_trace = EnergyTrace(
self.trace.interpretation, data=filtered_data,
unit=self.trace.unit)
model = self.model_mapping[modeling_period_label]
try:
input_data = self.formatter.create_input(
filtered_trace, weather_source)
except:
logger.warn(
'For trace "{}" and modeling_period "{}", was not'
' able to format input data for {}.'
.format(self.trace.interpretation, modeling_period_label,
model)
)
self.fit_outputs[modeling_period_label] = {
"status": "FAILURE",
"traceback": traceback.format_exc(),
"start_date": None,
"end_date": None,
"rows": None,
}
continue
else:
input_description = self.formatter.describe_input(input_data)
outputs = {
"start_date": input_description.get('start_date'),
"end_date": input_description.get('end_date'),
"n_rows": input_description.get('n_rows'),
}
try:
outputs.update(model.fit(input_data))
except:
logger.warn(
'For trace "{}" and modeling_period "{}", {} was not'
' able to fit using input data: {}'
.format(self.trace.interpretation, modeling_period_label,
model, input_data)
)
outputs.update({
"status": "FAILURE",
"traceback": traceback.format_exc(),
})
else:
logger.info(
'Successfully fitted {} to formatted input data for'
' trace "{}" and modeling_period "{}".'
.format(model, self.trace.interpretation,
modeling_period_label)
)
outputs.update({"status": "SUCCESS"})
self.fit_outputs[modeling_period_label] = outputs
return self.fit_outputs
def predict(self, modeling_period_label, demand_fixture_data,
params=None):
''' Predict for any one of the modeling_periods associated with this
trace. Light wrapper around :code:`model.predict(` method.
Parameters
----------
modeling_period_label : str
Modeling period indicating which model to use in making the
prediction.
demand_fixture_data : object
Data (formatted by :code:`self.formatter`) over which prediction
should be made.
params : object, default None
Fitted parameters for the model. If :code:`None`, use parameters
found when :code:`.fit(` method was called.
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
logger.warn(
'Skipping prediction for modeling_period "{}" because'
' model fit failed.'.format(modeling_period_label)
)
return None
if params is None:
params = outputs["model_params"]
return self.model_mapping[modeling_period_label].predict(
demand_fixture_data, params)
def compute_derivative(self, modeling_period_label, derivative_callable,
**kwargs):
''' Compute a modeling derivative for this modeling period.
Parameters
|
----------
|
modeling_period_label : str
Label for modeling period for which derivative should be computed.
derivative_callable : callable
Callable which can be used as follows:
.. code-block: python
>>> derivative_callable(formatter, model, **kwargs)
**kwargs
Arbitrary keyword arguments to be passed to the derviative callable
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
return None
model = self.model_mapping[modeling_period_label]
try:
derivative = derivative_callable(self.formatter, model, **kwargs)
except Exception:
logger.exception("Derivative computation failed.")
return None
return derivative
@staticmethod
def _filter_by_modeling_period(trace, modeling_period):
start = modeling_period.start_date
end = modeling_period.end_date
if start is None:
if end is None:
filtered_df = trace.data.copy()
else:
filtered_df = trace.data[:end].copy()
else:
if end is None:
filtered_df = trace.data[start:].copy()
else:
filtered_df = trace.data[start:end].copy()
# require NaN last data point as cap
if filtered_df.shape[0] > 0:
filtered_df.value.iloc[-1] = np.nan
filtered_df.estimated.iloc[-1] = False
return filtered_df
|
arunhotra/tensorflow
|
tensorflow/python/framework/random_seed.py
|
Python
|
apache-2.0
| 4,427
| 0.002259
|
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
_DEFAULT_GRAPH_SEED = 87654321
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed).
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
graph_seed = ops.get_default_graph().seed
if graph_seed is not None:
if op_seed is not None:
return graph_seed, op_seed
else:
return graph_seed, ops.get_default_graph()._last_id
else:
if op_seed is not None:
return _DEFAULT_GRAPH_SEED, op_seed
else:
return None, None
def set_random_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random_uniform([1])
b = tf.random_normal([1])
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A3'
print sess2.run(a) # generates 'A4'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random_uniform([1], seed=1)
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.set_random_seed(1234)
a = tf.random_uniform([1])
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate different
# sequences of 'a' and 'b'.
print "Session 1"
with tf.Session() as sess1:
|
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B1'
print sess2.run(b) # generates 'B2'
```
|
Args:
seed: integer.
"""
ops.get_default_graph().seed = seed
|
Unallocated/UAS_IRC_Bot
|
modules/address.py
|
Python
|
gpl-3.0
| 195
| 0.010256
|
def address(self, data):
|
self.irc.send(self.privmsg("512 Sha
|
w Court #105, Severn, MD 21144"))
|
rackerlabs/django-DefectDojo
|
dojo/api_v2/prefetch/utils.py
|
Python
|
bsd-3-clause
| 1,916
| 0.003132
|
from django.db.models.fields import related
def _is_many_to_many_relation(field):
"""Check if a field specified a many-to-many relationship as defined by django.
This is the case if the field is an instance of the ManyToManyDescriptor as generated
by the django framework
Args:
field (django.db.models.fields): The field to check
Returns:
bool: true if the field is a many-to-many relationship
"""
return isinstance(field, related.ManyToManyDescriptor)
def _is_one_to_one_relation(field):
"""Check if a field specified a one-to-one relationship as defined by django.
This is the case if the field is an instance of the ForwardManyToOne as generated
by the django framework
Args:
field (django.db.models.fields): The field to check
Returns:
bool: true if the field is a one-to-one relationship
"""
return isinstance(field, related.ForwardManyToOneDescriptor)
def _get_prefetchable_fields(serializer):
"""Get the fields that are prefetchable according to the serializer description.
Method mainly used by for automatic schema generation.
Args:
serializer (Serializer): [description]
"""
def _is_field_prefetchable(field):
return _is_one_to_one_relation(field) or _is_many_to_many_relation(field)
meta = getattr(seria
|
lizer, "Meta", None)
if meta is None:
return []
model = getattr(meta, "model", None)
if model is None:
return []
fields = []
for field_name in dir(model):
field = getattr(model, field_name)
if _is_field_prefetchable(field):
# ManyToMany relationship can be reverse
if hasattr(field, 'reverse') and field.
|
reverse:
fields.append((field_name, field.field.model))
else:
fields.append((field_name, field.field.related_model))
return fields
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/jedi/evaluate/__init__.py
|
Python
|
bsd-2-clause
| 26,873
| 0.001191
|
"""
Evaluation of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle called lazy evaluation. If you
don't know about it, google it. That said, the typical entry point for static
analysis is calling ``eval_statement``. There's separate logic for
autocompletion in the API, the evaluator is all about evaluating an expression.
Now you need to understand what follows after ``eval_statement``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_statement`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment.
- ``Evaluator.eval_element`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_element`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_statement`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy evaluation is really good because it
only *evaluates* what needs to be *evaluated*. All the statements and modules
that are not used are just being ignored.
"""
import copy
import sys
from jedi.parser.python import tree
from jedi import debug
from jedi.common import unite
from jedi.evaluate import representation as er
from jedi.evaluate import imports
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate.cache import memoize_default
from jedi.evaluate import stdlib
from jedi.evaluate import finder
from jedi.evaluate import compiled
from jedi.evaluate import precedence
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate import pep0484
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.instance import AnonymousInstance, BoundMethod
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
class Evaluator(object):
def __init__(self, grammar, sys_path=None):
self.grammar = grammar
self.memoize_cache = {} # for memoize decorators
# To memorize modules -> equals `sys.modules`.
self.modules = {} # like `sys.modules`.
self.compiled_cache = {} # see `evaluate.compiled.create()`
self.mixed_cache = {} # see `evaluate.compiled.mixed.create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.python_version = sys.version_info[:2]
if sys_path is None:
sys_path = sys.path
self.sys_path = copy.copy(sys_path)
try:
self.sys_path.remove('')
except ValueError:
pass
self.reset_recursion_limitations()
# Constants
self.BUILTINS = compiled.get_special_object(self, 'BUILTINS')
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def find_types(self, context, name_or_str, name_context, position=None,
search_global=False, is_goto=False):
"""
This is the search function. The most important part to debug.
`remove_statements` and `filter_statements` really are the core part of
this completion.
:param position: Position of the last statement -> tuple of line, column
:return: List of Names. Their parents are the types.
"""
f = finder.NameFinder(self, context, name_context, name_or_str, position)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
def eval_statement(self, context, stmt, seek_name=None):
with recursion.execution_allowed(self, stmt) as allowed:
if allowed or context.get_root_context() == self.BUILTINS:
return self._eval_stmt(context, stmt, seek_name)
return set()
#@memoize_default(default=[], evaluator_is_first_arg=True)
@debug.increase_indent
def _eval_stmt(self, context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_statement %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
types = self.eval_element(context, rhs)
if seek_name:
c_node = ContextualizedName(context, seek_name)
types = finder.check_tuple_assignments(self, c_node, types)
first_operation = stmt.first_operation()
if first_operation not in ('=', None) and first_operation.type == 'operator':
# `=` is always the last character in aug assignments -> -1
operator = copy.copy(first_operation)
operator.value = operator.value[:-1]
name = str(stmt.get_defined_names()[0])
left = context.py__getattribute__(
name, position=stmt.start_pos, search_global=True)
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
if for_stmt is not None and for_stmt.type == 'for_stmt' and types \
and for_stmt.defines_one_name():
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_input_node()
cn = ContextualizedNode(context, node)
ordered = list(iterable.py__iter__(self, cn.infer(), cn))
for lazy_context in ordered:
dct = {str(for_stmt.children[1]): lazy_context.infer()}
with helpers.predefine_names(context, for_stmt, dct):
t = self.eval_element(context, rhs)
|
left = precedence.calculate(self, context, left, operator, t)
types = left
else:
|
types = precedence.calculate(self, context, left, operator, types)
debug.dbg('eval_statement result %s', types)
return types
def eval_element(self, context, element):
if isinstance(context, iterable.CompForContext):
return self._eval_element_not_cached(context, element)
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
|
satriaphd/bgc-learn
|
bgc-learn.py
|
Python
|
gpl-3.0
| 18,044
| 0.004434
|
import os
import sys
import shutil
import straight.plugin
import numpy as np
import pkg_resources
from os import path
from core import utils
from core import argparser
from core import log
from core import parser
def main():
## Parse arguments
ap = argparser.init_arg_parser()
options = ap.parse_args()
## Collect input gbks from folder
input_files = []
if not path.isdir(options["input_folder"]):
log.error("Specified folder didn't exist '%s'" % (options["input_folder"]))
sys.exit(1)
else:
for filename in os.listdir(options["input_folder"]):
filepath = path.join(options["input_folder"], filename)
if not path.isdir(filepath):
ext = path.splitext(filepath)[1][1:]
if ext in ["gbk"]:
input_files.append(filename)
## Initial check parameters
metadata = {}
if options["mode"] == "train":
## check and load metadata file
if not path.exists(options["training_metadata"]):
log.error("Specified file didn't exist '%s'" % (options["training_metadata"]))
sys.exit(1)
else:
metadata = parser.parse_training_metadata(options["training_metadata"])
options["single_values"] = [[]] * len(input_files)
options["train_set"] = []
options["test_set"] = []
# remove GBKs not listed in metadata
input_files[:] = [bgc for bgc in input_files if utils.get_bgc_name(bgc) in metadata["bgc"]]
# features
if "features" not in options:
if "features" not in metadata:
options["features"] = [{"name": plugin.name, "params": [], "subs": [sub for sub in plugin.features]} for plugin in utils.load_plugins("feature_extraction")]
else:
options["features"] = metadata["features"]
# algorithm mode (classification / regression)
if metadata["mode"] == "CLASSIFICATION":
options["algo_mode"] = "classification"
if "algorithm" not in options:
if "algorithm" not in metadata:
options["algorithm"] = {"name": "svm", "params": []}
else:
options["algorithm"] = metadata["algorithm"]
elif metadata["mode"] == "REGRESSION":
options["algo_mode"] = "regression"
if "algorithm" not in options:
if "algorithm" not in metadata:
options["algorithm"] = {"name": "linear_regression", "params": []}
else:
options["algorithm"] = metadata["algorithm"]
else:
log.error("Incorrect metadata file format '%s'" % (options["training_metadata"]))
sys.exit(1)
# single values (from right hand side of data column) & train/test set distribution
for i, fp in enumerate(input_files):
bgc_id = utils.get_bgc_name(fp)
if bgc_id in metadata["bgc"]:
idx_meta = metadata["bgc"].index(bgc_id)
options["single_values"][i] = metadata["single_values"][idx_meta]
if idx_meta in metadata["train_set"]:
options["train_set"].append(i)
if idx_meta in metadata["test_set"]:
options["test_set"].append(i)
else:
log.error("'%s' is not included in your metadata" % (bgc_id))
sys.exit(1)
# pair values for training set (from its own table from the metadata)
options["train_pair_values"] = [[None] * len(options["train_set"]) for _ in range(len(options["train_set"]))]
for i, idx1 in enumerate(options["train_set"]):
for j, idx2 in enumerate(options["train_set"]):
if len(metadata["train_pair_values"]) > i and len(metadata["train_pair_values"][i]) > j:
options["train_pair_values"][i][j] = metadata["train_pair_values"][i][j]
# pair values for test set (from its own table from the metadata)
options["test_pair_values"] = [[None] * len(options["test_set"]) for _ in range(len(options["test_set"]))]
for i, idx1 in enumerate(options["test_set"]):
for j, idx2 in enumerate(options["test_set"]):
if len(metadata["test_pair_values"]) > i and len(metadata["test_pair_values"][i]) > j:
options["test_pair_values"][i][j] = metadata["test_pair_values"][i][j]
if options["mode"] == "predict":
## check and load model file
print "..."
## further checks..
algo_type = utils.get_algo_type(options["algorithm"]["name"])
if algo_type not in ["classification", "regression"]:
log.error("Selected algorithm '%s' did not exist" % (algo["name"]))
sys.exit(1)
if options["algo_mode"] != algo_type:
log.error("Selected algorithm '%s' is for %s, but the provided data is for %s." % (options["algorithm"]["name"], algo_type, options["algo_mode"]))
sys.exit(1)
options["features_scope"] = ""
for idx, feature in enumerate(options["features"]):
for plugin in utils.load_plugins("feature_extraction"):
|
if plugin.name == feature["name"]:
if len(options["features_scope"]) > 0 and plugin.scope != options["features_scope"]:
log.error("You selected fe
|
atures of different scope ('%s:%s', '%s:%s'). Please select only combination of features with the same scope." % (feature["name"], plugin.scope, options["features"][idx - 1]["name"], options["features_scope"]))
sys.exit(1)
options["features_scope"] = plugin.scope
break
if len(feature["subs"]) < 1:
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
feature["subs"].extend(plugin.features)
break
for sub in feature["subs"]:
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
if sub not in plugin.features:
log.error("Feature unknown: '%s'" % sub)
sys.exit(1)
## Check output folder
if not options["output_folder"]:
options["output_folder"] = path.join(os.getcwd(), path.basename(options["input_folder"]))
if path.isdir(options["output_folder"]):
# output folder exist, probable disrupted job
if not options["continue"] and not options["overwrite"]:
log.error("Output folder '%s' exist. Previous run? use --continue to continue, or --overwrite to start over." % options["output_folder"])
sys.exit(1)
elif options["overwrite"]:
shutil.rmtree(options["output_folder"])
os.makedirs(options["output_folder"])
elif options["reset_preprocesses"]:
bgcjsonpath = path.join(options["output_folder"], "bgcjson")
if path.exists(bgcjsonpath):
shutil.rmtree(bgcjsonpath)
else:
os.makedirs(options["output_folder"])
## Parse gbks
## TODO: multi-threading?
log.info("Started preprocessing input files..")
utils.print_progress(0, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1)
for i, filename in enumerate(input_files):
filepath = path.join(options["input_folder"], filename)
if not (path.exists(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(filepath)))):
bgc = parser.parse_gbk(filepath)
if bgc is not None:
utils.save_bgcjson(bgc, options["output_folder"])
utils.print_progress(i + 1, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1, bar_length=100)
log.info("Finished preprocessing input files..")
## Do feature extraction
# step 1: make folder structu
|
pupeng/hone
|
Controller/hone_lib.py
|
Python
|
bsd-3-clause
| 9,045
| 0.015478
|
# Copyright (c) 2011-2013 Peng Sun. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYRIGHT file.
# hone_lib.py
# provide library for mgmt program to create dataflow
import inspect
from cStringIO import StringIO
import hone_rts
from hone_util import LogUtil
from hone_message import *
globalFlowId = 0
''' class for data flow '''
def getNextFlowId():
global globalFlowId
globalFlowId += 1
return globalFlowId
class HoneDataFlow:
def __init__(self, q, operator):
self.flow = []
self.subFlows = [] #list of class HoneDataFlow items. Merged flows
self.flowId = getNextFlowId()
if (q != None):
self.flow.append(q)
if (operator != None):
self.flow.append(operator)
#debugLog('lib', 'new HoneDataFlow', self.flow)
def __rshift__(self, other):
#debugLog('lib', 'In rshift of HoneDataFlow', 'self', self.flow, 'other', \
# other.flow)
self.flow = self.flow + other.flow
return self
def addSubFlow(self, x):
self.subFlows.append(x)
def printDataFlow(self):
buf = StringIO()
print >>buf, 'flow id: ',self.flowId
if (isinstance(self.flow[0], HoneQuerySerialized)):
print >>buf, 'Select:',self.flow[0].se
print >>buf, 'From:',self.flow[0].ft
print >>buf, 'Where:',self.flow[0].wh
print >>buf, 'Groupby:',self.flow[0].gp
print >>buf, 'Every:',self.flow[0].ev
print >>buf, 'Aggregate:',self.flow[0].agg
print >>buf, self.flow[1:]
else:
print >>buf, self.flow
print >>buf, '\n'
ret = buf.getvalue()
buf.close()
for subFlow in self.subFlows:
|
ret += subFlow.printDataFlow()
return ret
def getFlowCriterion(self):
return self.flow[0].wh
''' query part '''
class HoneQuery:
def __init__(self,var,ft,wh,gp,every,agg,compose):
self.complete = False
self.var = var
self.ft = ft
self.wh = wh
self.gp = gp
self.every = every
self.agg = agg
self.compose = compose
def __
|
rshift__(self, other):
HoneQuerySyntaxCheck(self)
#debugLog('lib', 'new HoneQuery instance created', self.printQuery())
return self.convertToHoneDataFlow() >> other
def __mul__(self, other):
otherName = other.__class__.__name__
if otherName=='HoneQuery':
return other.compose(self)
else:
raise Exception('HoneQuery cannot compose with %s' % otherName)
def printQuery(self):
ret = StringIO()
print >>ret, 'HoneQuery Select:',self.var
print >>ret, 'HoneQuery From:',self.ft
print >>ret, 'HoneQuery Where:',self.wh
print >>ret, 'HoneQuery Groupby:',self.gp
print >>ret, 'HoneQuery Every:',self.every
print >>ret, 'HoneQuery Aggregate:',self.agg
return ret.getvalue()
def convertToHoneDataFlow(self):
query = HoneQuerySerialized()
query.se = self.var
query.ft = self.ft
query.wh = self.wh
query.gp = self.gp
query.ev = self.every
query.agg = self.agg
return HoneDataFlow(query, None)
def Select(x):
def compose(q):
if q.var == None:
q.var = []
q.var = q.var+x
return q
agg = None
for i in range(0,len(x)):
if (type(x[i]) == type(tuple())):
if (agg == None):
agg = []
agg.append(x[i])
x[i] = x[i][0]
return HoneQuery(x,None,None,None,1000,agg,compose)
def From(ft):
def compose(q):
q.ft = ft
return q
return HoneQuery(None,ft,None,None,None,None,compose)
def Where(wh):
def compose(q):
if q.wh == None:
q.wh = []
q.wh = q.wh + wh
return q
return HoneQuery(None,None,wh,None,None,None,compose)
def Groupby(gp):
def compose(q):
if q.gp == None:
q.gp = []
q.gp = q.gp + gp
return q
return HoneQuery(None,None,None,gp,None,None,compose)
def Every(every):
def compose(q):
q.every = every
return q
return HoneQuery(None,None,None,None,every,None,compose)
def HoneQuerySyntaxCheck(q):
#debugLog('lib', 'syntax check of query', q.printQuery())
varOnlySupportEqualInWhere = ['app', 'srcIP', 'dstIP', 'srcPort', 'dstPort']
if q.var is None:
raise Exception('HoneQuery must at least have a Select')
if q.ft is None:
raise Exception('HoneQuery must have a From table')
if not hone_rts.HoneTableTypes.has_key(q.ft):
raise Exception('HoneQuery: No such From Table {}'.format(q.ft))
varName = []
for typ in q.var:
varName.append(typ)
if not (q.wh is None):
for (typ, op, value) in q.wh:
if not typ in varName:
raise Exception('HoneQuery: Where of not-Selected columns')
if (typ in varOnlySupportEqualInWhere) and (not (op == '==')):
raise Exception('Var {} only support == in Where clause'.format(typ))
if not (q.gp is None):
for typ in q.gp:
if not typ in varName:
raise Exception('HoneQuery: Groupby of not-Selected columns')
for typ in varName:
if not (typ in hone_rts.HoneTableTypes[q.ft]):
raise Exception('HoneQuery No type {} in Table {}'.format(typ, q.ft))
if q.agg is not None:
for (typ, op) in q.agg:
if not op in ['max', 'min', 'sum', 'avg']:
raise Exception('Only max, min, sum, avg are supported in Select {}'.format(typ))
if (q.ft == 'AppStatus'):
if 'app' not in varName:
#debugLog('lib', 'syntax check', q.printQuery())
raise Exception('Must Select \'app\' in AppStatus table')
''' operator part '''
def MapStreamSet(f):
if (isinstance(f,HoneDataFlow)):
return HoneDataFlow(None,['MapStreamSet'] + f.flow[0])
else:
return HoneDataFlow(None,['MapStreamSet', f.__name__])
def MapStream(f):
if (isinstance(f,HoneDataFlow)):
return HoneDataFlow(None,['MapStream'] + f.flow[0])
else:
return HoneDataFlow(None,['MapStream', f.__name__])
def MapList(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['MapList'] + f.flow[0])
else:
return HoneDataFlow(None,['MapList', f.__name__])
def FilterStreamSet(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterStreamSet'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterStreamSet', f.__name__])
def FilterStream(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterStream'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterStream', f.__name__])
def FilterList(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterList'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterList', f.__name__])
def ReduceStreamSet(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceStreamSet', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceStreamSet', init, f.__name__])
def ReduceStream(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceStream', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceStream', init, f.__name__])
def ReduceList(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceList', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceList', init, f.__name__])
def MergeHosts():
return HoneDataFlow(None,['MergeHosts'])
def MergeStreams(stream1, stream2):
if isinstance(stream1, HoneQuery):
stream1 = stream1.convertToHoneDataFlow()
if isinstance(stream2, HoneQuery):
stream2 = stream2.convertToHoneDataFlow()
operator = ['MergeStreams']
stream1.addSubFlow(stream2)
operator.append(stream2.flowId)
stream1
|
tommy-u/enable
|
kiva/agg/tests/clip_to_rect_test_case.py
|
Python
|
bsd-3-clause
| 12,045
| 0.003653
|
""" Needed Tests
clip_to_rect() tests
--------------------
DONE *. clip_to_rect is inclusive on lower end and exclusive on upper end.
DONE *. clip_to_rect behaves intelligently under scaled ctm.
DONE *. clip_to_rect intersects input rect with the existing clipping rect.
DONE *. current rectangular clipping path is saved/restored to the stack when
save_state/restore_state are called.
DONE *. clip_to_rect clears current path.
DONE *. clip_to_rect raises NotImplementedError under a rotated ctm.
clip_to_rects() tests
---------------------
DONE *. Test that clip_to_rects raises not implemented, or whatever.
"""
import unittest
from numpy import array, transpose
import nose
from kiva.agg import GraphicsContextArray
import kiva
from test_utils import Utils
class ClipToRectTestCase(unittest.TestCase, Utils):
#------------------------------------------------------------------------
# Simple Clipping to a single rectangle.
#------------------------------------------------------------------------
def clip_to_rect_helper(self, desired, scale, clip_rects):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_args -- passed in as *clip_args to clip_to_rect.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
if isinstance(clip_rects, tuple):
gc.clip_to_rect(*clip_rects)
else:
for rect in clip_rects:
gc.clip_to_rect(*rect)
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc.set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_clip_to_rect_simple(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple2(self):
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 0, 255, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 1, 1)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_negative(self):
desired = array([[255, 255, 255, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]])
clip_rect = (-1, -1, 4, 4)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple3(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.49, 2.49)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple4(self):
desired = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.5, 2.5)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple5(self):
# This tests clipping with a larger rectangle
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rects = [(1, 1, 2, 2), (0, 0, 4, 4)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_empty_clip_region(self):
# This tests when the clipping region is clipped down to nothing.
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255]])
clip_rects = [(1,1,4,4), (3,3,1,1), (1,1,1,1)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_clip_to_rect_scaled(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_clip_to_rect_scaled2(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2.25, 2.25)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_save_restore_clip_state(self):
desired1 = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
desired2 = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
gc = GraphicsContextArray((4,4), pix_format="rgb24")
gc.clear((1.0, 1.0, 1.0))
gc.set_fill_color((0.0, 0.0, 0.0))
gc.clip_to_rect(1, 1, 3, 3)
gc.save_state()
gc.clip_to_rect(1, 1, 2, 2)
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual1 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired1, actual1)
gc.restore_state()
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual2 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired2, actual2)
def test_clip_to_rect_rotated(self):
# FIXME: test skipped
# This test raises an exception currently because the
# underlying library doesn't handle clipping to a rotated
# rectangle. For now, we catch the the case with an
# exception, so that people can't screw up. In the future,
# we should actually support this functionality.
raise nose.SkipTest
gc = GraphicsContextArray((1,1), pix_format="rgb24")
gc.rotate_ctm(1.0)
self.failUnlessRaises(NotImplementedError,
gc.clip_to_rect, 0, 0, 1, 1)
#-----------------------------------
|
--------
|
-----------------------------
# Successive Clipping of multiple rectangles.
#------------------------------------------------------------------------
def successive_clip_helper(self, desired, scale,
clip_rect1, clip_rect2):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_rect1 -- 1st clipping path.
clip_rect2 -- 2nd clipping path.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 2
|
MungoRae/home-assistant
|
tests/components/test_splunk.py
|
Python
|
apache-2.0
| 3,879
| 0
|
"""The tests for the Splunk component."""
import unittest
from unittest import mock
from homeassistant.setup import setup_component
import homea
|
ssistant.components.splunk as splunk
from homeassistant.const import STATE_ON, STATE_OFF, EVENT_STATE_CHANGED
from tests.common import get_test_home_assistant
class TestSplunk(unittest.TestCase):
"""Test the Splunk component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are start
|
ed."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_config_full(self):
"""Test setup with all data."""
config = {
'splunk': {
'host': 'host',
'port': 123,
'token': 'secret',
'ssl': 'False',
'name': 'hostname',
}
}
self.hass.bus.listen = mock.MagicMock()
self.assertTrue(setup_component(self.hass, splunk.DOMAIN, config))
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(EVENT_STATE_CHANGED,
self.hass.bus.listen.call_args_list[0][0][0])
def test_setup_config_defaults(self):
"""Test setup with defaults."""
config = {
'splunk': {
'host': 'host',
'token': 'secret',
}
}
self.hass.bus.listen = mock.MagicMock()
self.assertTrue(setup_component(self.hass, splunk.DOMAIN, config))
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(EVENT_STATE_CHANGED,
self.hass.bus.listen.call_args_list[0][0][0])
def _setup(self, mock_requests):
"""Test the setup."""
self.mock_post = mock_requests.post
self.mock_request_exception = Exception
mock_requests.exceptions.RequestException = self.mock_request_exception
config = {
'splunk': {
'host': 'host',
'token': 'secret',
'port': 8088,
}
}
self.hass.bus.listen = mock.MagicMock()
setup_component(self.hass, splunk.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
@mock.patch.object(splunk, 'requests')
@mock.patch('json.dumps')
def test_event_listener(self, mock_dump, mock_requests):
"""Test event listener."""
mock_dump.side_effect = lambda x: x
self._setup(mock_requests)
valid = {'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0,
'foo': 'foo',
}
for in_, out in valid.items():
state = mock.MagicMock(state=in_,
domain='fake',
object_id='entity',
attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'domain': 'fake',
'entity_id': 'entity',
'attributes': {},
'time': '12345',
'value': out,
'host': 'HASS',
}]
payload = {'host': 'http://host:8088/services/collector/event',
'event': body}
self.handler_method(event)
self.assertEqual(self.mock_post.call_count, 1)
self.assertEqual(
self.mock_post.call_args,
mock.call(
payload['host'], data=payload,
headers={'Authorization': 'Splunk secret'},
timeout=10
)
)
self.mock_post.reset_mock()
|
darcyliu/storyboard
|
boto/rds/parametergroup.py
|
Python
|
mit
| 7,126
| 0.002666
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
|
#
# THE SOFTWARE IS PROVIDED "AS IS"
|
, WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class ParameterGroup(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self.name = None
self.description = None
self.engine = None
self._current_param = None
def __repr__(self):
return 'ParameterGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Parameter':
if self._current_param:
self[self._current_param.name] = self._current_param
self._current_param = Parameter(self)
return self._current_param
def endElement(self, name, value, connection):
if name == 'DBParameterGroupName':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Engine':
self.engine = value
else:
setattr(self, name, value)
def modifiable(self):
mod = []
for key in self:
p = self[key]
if p.is_modifiable:
mod.append(p)
return mod
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
param.value = value
param.apply_method = apply_method
self.params.append(param)
class Parameter(object):
"""
Represents a RDS Parameter
"""
ValidTypes = {'integer' : int,
'string' : str,
'boolean' : bool}
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
self.type = str
self.source = None
self.is_modifiable = True
self.description = None
self.apply_method = None
self.allowed_values = None
def __repr__(self):
return 'Parameter:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ParameterName':
self.name = value
elif name == 'ParameterValue':
self._value = value
elif name == 'DataType':
if value in self.ValidTypes:
self.type = value
elif name == 'Source':
if value in self.ValidSources:
self.source = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'Description':
self.description = value
elif name == 'ApplyType':
if value in self.ValidApplyTypes:
self.apply_type = value
elif name == 'AllowedValues':
self.allowed_values = value
else:
setattr(self, name, value)
def merge(self, d, i):
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
if self._value is not None:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, str) or isinstance(value, unicode):
raise ValueError, 'value must be of type str'
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
raise ValueError, 'value must be in %s' % self.allowed_values
self._value = value
def _set_integer_value(self, value):
if isinstance(value, str) or isinstance(value, unicode):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
raise ValueError, 'range is %s' % self.allowed_values
self._value = value
else:
raise ValueError, 'value must be integer'
def _set_boolean_value(self, value):
if isinstance(value, bool):
self._value = value
elif isinstance(value, str) or isinstance(value, unicode):
if value.lower() == 'true':
self._value = True
else:
self._value = False
else:
raise ValueError, 'value must be boolean'
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
elif self.type == 'integer':
self._set_integer_value(value)
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
raise TypeError, 'unknown type (%s)' % self.type
def get_value(self):
if self._value == None:
return self._value
if self.type == 'string':
return self._value
elif self.type == 'integer':
if not isinstance(self._value, int) and not isinstance(self._value, long):
self._set_integer_value(self._value)
return self._value
elif self.type == 'boolean':
if not isinstance(self._value, bool):
self._set_boolean_value(self._value)
return self._value
else:
raise TypeError, 'unknown type (%s)' % self.type
value = property(get_value, set_value, 'The value of the parameter')
def apply(self, immediate=False):
if immediate:
self.apply_method = 'immediate'
else:
self.apply_method = 'pending-reboot'
self.group.connection.modify_parameter_group(self.group.name, [self])
|
lewissbaker/cake
|
src/cake/task.py
|
Python
|
mit
| 15,540
| 0.013964
|
"""Task Utilities.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 Lewis Baker, Stuart McMahon.
@license: Licensed under the MIT license.
"""
import sys
import threading
_threadPool = None
_threadPoolLock = threading.Lock()
def setThreadPool(threadPool):
"""Set the default thread pool to use for executing new tasks.
@param threadPool: The new default thread pool.
@return: The previous default thread pool. This is intially None.
"""
global _threadPool, _threadPoolLock
_threadPoolLock.acquire()
try:
oldThreadPool = _threadPool
_threadPool = threadPool
finally:
_threadPoolLock.release()
return oldThreadPool
def getDefaultThreadPool():
"""Get the current default thread pool for new tasks.
If no default thread pool exists then one will be created automatically.
"""
global _threadPool, _threadPoolLock
if _threadPool is None:
import cake.threadpool
processorCount = cake.threadpool.getProcessorCount()
_threadPoolLock.acquire()
try:
if _threadPool is None:
_threadPool = cake.threadpool.ThreadPool(numWorkers=processorCount)
finally:
_threadPoolLock.release()
return _threadPool
class TaskError(Exception):
"""An exception type raised by the L{Task} class.
"""
pass
def _makeTasks(value):
if value is None:
return []
elif isinstance(value, Task):
return [value]
else:
return list(value)
class Task(object):
"""An operation that is performed on a background thread.
"""
class State(object):
"""A class that represents the state of a L{Task}.
"""
NEW = "new"
"""The task is in an uninitialised state."""
WAITING_FOR_START = "waiting for start"
"""The task is waiting to be started."""
RUNNING = "running"
"""The task is running."""
WAITING_FOR_COMPLETE = "waiting for complete"
"""The task is waiting to complete."""
SUCCEEDED = "succeeded"
"""The task has succeeded."""
FAILED = "failed"
"""The task has failed."""
_current = threading.local()
def __init__(self, func=None):
"""Construct a task given a function.
@param func: The function this task should run.
@type func: any callable
"""
self._func = func
self._immediate = None
self._threadPool = None
self._required = False
self._parent = Task.getCurrent()
self._state = Task.State.NEW
self._lock = threading.Lock()
self._startAfterCount = 0
self._startAfterFailures = False
self._startAfterDependencies = None
self._completeAfterCount = 0
self._completeAfterFailures = False
self._completeAfterDependencies = None
self._callbacks = []
@staticmethod
def getCurrent():
"""Get the currently executing task.
@return: The currently executing Task or None if no current task.
@rtype: Task or None
"""
return getattr(Task._current, "value", None)
@property
def state(self):
"""Get the state of this task.
"""
return self._state
@property
def parent(self):
"""Get the parent of this task.
The parent task is the task that created this task.
"""
return self._parent
@property
def required(self):
"""True if this task is required to execute, False if it
has not yet been required to execute.
"""
return self._required
@property
def started(self):
"""True if this task has been started.
A task is started if start(), startAfter(), lazyStart(),
lazyStartAfter() or cancel() has been called on it.
"""
return self._state is not Task.State.NEW
@property
def completed(self):
"""True if this task has finished execution or has been cancelled.
"""
s = self._state
return s is Task.State.SUCCEEDED or s is Task.State.FAILED
@property
def succeeded(self):
"""True if this task successfully finished execution.
"""
return self._state is Task.State.SUCCEEDED
@property
def failed(self):
"""True if this task failed or was cancelled.
"""
return self._state is Task.State.FAILED
@property
def result(self):
"""If the task has completed successfully then holds the
return value of the task, otherwise raises AttributeError.
"""
if self.succeeded:
task = self
while isinstance(task._result, Task):
task = task._result
return task._result
else:
raise AttributeError("result only available on successful tasks")
def lazyStart(self, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
A 'required' task is a task that is started eagerly using L{start()} or L{startAfter()}
or a task that is a dependency of a 'required' task.
If no other required tasks have this task as a dependency then this task will never
be executed. i.e. it is a lazy task.
"""
self._start(other=None, immediate=False, required=False, threadPool=threadPool)
def lazyStartAfter(self, other, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
But do not start this task until the 'other' tasks have completed.
If any of the other tasks complete with failure then this task will complete
with failure without being executed.
"""
self._start(other=other
|
, immediate=False, required=False, threadPool=threadPool)
def start(self, immediate=False, threadPool=None):
"""Start this task now.
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: If specified then the task will be queued up to be
executed on the specified thread-pool. If not specified
|
then the task
will be queued for execution on the default thread-pool.
@type threadPool: L{ThreadPool} or C{None}
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=None, immediate=immediate, required=True, threadPool=threadPool)
def startAfter(self, other, immediate=False, threadPool=None):
"""Start this task after other tasks have completed.
This task is cancelled (transition to Task.State.FAILED state) if any of the
other tasks fail.
@param other: The task or a list of tasks to start after.
@type other: L{Task} or C{list}(L{Task})
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: An optional thread pool to start this task on.
If not specified then the task is queued to the default thread-pool.
@type threadPool: L{ThreadPool} or None
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=other, immediate=immediate, required=True, threadPool=threadPool)
def _start(self, other, immediate, required, threadPool):
immediate = bool(immediate)
required = bool(required)
otherTasks = _makeTasks(other)
if threadPool is None:
threadPool = getDefaultThreadPool()
self._lock.acquire()
try:
if self._state is not Task.State.NEW:
raise TaskError("task already started")
self._state = Task.State.WAITING_FOR_START
self._startAfterCount = len(otherTasks) + 1
self._immediate = immediate
self._threadPool = threadPool
if required:
self._required = True
else:
required = self._required
if required:
completeAfterDependencies = self._completeAfterDependencies
self._completeAfterDependencies = None
else:
self._startAfterDependencies = otherTasks
finally:
self._lock.release()
if required:
for t in otherTasks:
t._require()
t.addCallback(lambda t=t: self._startAfterCallback(t))
if completeAfterDependencies:
for t in completeAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
self._startAfterCallback(self)
def _require(self):
|
Antergos/whither
|
whither/toolkits/gtk/web_container.py
|
Python
|
gpl-3.0
| 985
| 0
|
# -*- coding: utf-8 -*-
#
# web_container.py
#
# Copyright © 2016-2017 Antergos
#
# This file is part of whither.
#
# whither is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# whither is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; w
|
ithout even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public Li
|
cense
# along with whither; If not, see <http://www.gnu.org/licenses/>.
|
scripnichenko/nova
|
nova/api/openstack/compute/floating_ip_pools.py
|
Python
|
apache-2.0
| 2,196
| 0
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import network
ALIAS = 'os-floating-ip-pools'
authorize = extensions.os_compute_authorizer(ALIAS)
def _
|
translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(wsgi.Controller):
"""The Floating IP Pool API controller for the OpenStack A
|
PI."""
def __init__(self):
self.network_api = network.API(skip_policy_check=True)
super(FloatingIPPoolsController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Return a list of pools."""
context = req.environ['nova.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class FloatingIpPools(extensions.V21APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPPoolsController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
videntity/django-djmongo
|
djmongo/mongoutils.py
|
Python
|
gpl-2.0
| 17,724
| 0.00079
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
from django.conf import settings
import json
import sys
import csv
from datetime import datetime, date, time
from bson.code import Code
from bson.objectid import ObjectId
from bson.errors import InvalidId
from bson import json_util
from pymongo import MongoClient, DESCENDING
from collections import OrderedDict
def checkObjectId(s):
try:
ObjectId(s)
except InvalidId:
return False
return True
def run_aggregation_pipeline(database_name, collection_name, pipeline):
result = False
mongodb_client_url = getattr(set
|
tings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# explain = db.command('aggregate', collection, pipeline=pipeline, explain=True)
# print explain
collection.aggregate(pipeline)
# print agg_result
result = True
return result
def to_json(resul
|
ts_dict):
return json.dumps(results_dict, indent=4, default=json_util.default)
def normalize_results(results_dict):
mydt = datetime.now()
myd = date.today()
myt = time(0, 0)
for r in results_dict['results']:
for k, v in r.items():
if isinstance(r[k], type(mydt)) or \
isinstance(r[k], type(myd)) or \
isinstance(r[k], type(myt)):
r[k] = v.__str__()
# print r[k]
return results_dict
def normalize_list(results_list):
mydt = datetime.now()
for r in results_list:
for k, v in r.items():
if isinstance(r[k], type(mydt)):
r[k] = v.__str__()
return results_list
def query_mongo(
database_name,
collection_name,
query={},
include_num_results="0",
skip=0,
sort=None,
limit=getattr(
settings,
'MONGO_LIMIT',
200),
cast_strings_to_integers=False,
return_keys=()):
"""return a response_dict with a list of search results"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# Cast the query to integers
if cast_strings_to_integers:
query = cast_number_strings_to_integers(query)
# print query
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit)
else:
mysearchresult = collection.find(query).skip(skip).limit(limit)
if sort:
mysearchresult.sort(sort)
response_dict['code'] = 200
if include_num_results == "1":
response_dict['num_results'] = response_dict['num_results'] = int(
mysearchresult.count(with_limit_and_skip=False))
if include_num_results == "2":
response_dict['num_results'] = response_dict['num_results'] = int(
mysearchresult.count(with_limit_and_skip=True))
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except Exception:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def query_mongo_sort_decend(
database_name,
collection_name,
query={},
skip=0,
limit=getattr(
settings,
'MONGO_LIMIT',
200),
return_keys=(),
sortkey=None):
"""return a response_dict with a list of search results in decending
order based on a sort key
"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit).sort(
sortkey, DESCENDING)
else:
mysearchresult = collection.find(query).skip(
skip).limit(limit).sort(sortkey, DESCENDING)
# response_dict['num_results']=int(mysearchresult.count(with_limit_and_skip=False))
response_dict['code'] = 200
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except Exception:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def delete_mongo(database_name, collection_name,
query={}, just_one=False):
"""delete from mongo helper"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
mysearchresult = collection.remove(query, just_one)
response_dict['code'] = 200
response_dict['type'] = "remove-confirmation"
except Exception:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def write_mongo(document, database_name,
collection_name, update=False):
"""Write a document to the collection. Return a response_dict containing
the written record. Method functions as both insert or update based on update
parameter"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# Cast the query to integers
# if settings.CAST_ININGS_TO_INTEGERS:
# query = cast_number_strings_to_integers(query)
potential_key_found = False
existing_transaction_id = None
existing_mongo_id = None
# enforce non-repudiation constraint on create
# if document.has_key("transaction_id"):
# existing_transaction_id = collection.find_one({'transaction_id':document['transaction_id']})
# if existing_transaction_id:
# potential_key_found = True
if "id" in document:
document["_id"] = ObjectId(document["id"])
del document["id"]
if "_id" in document:
existing_mongo_id = collection.find_one({'_id': document['_id']})
if existing_mongo_id:
|
cliftonmcintosh/openstates
|
openstates/mt/people.py
|
Python
|
gpl-3.0
| 7,587
| 0.001977
|
import re
import csv
from urllib import parse
import lxml.html
from pupa.scrape import Person, Scraper
class NoDetails(Exception):
pass
SESSION_NUMBERS = {
'2011': '62nd',
'2013': '63rd',
'2015': '64th',
'2017': '65th',
}
class MTPersonScraper(Scraper):
def url_xpath(self, url):
# Montana's legislator page was returning valid content with 500
# code as of 1/9/2013. Previous discussions with them after similar
# incidents in the past suggest some external part of their stack
# is having some issue and the error is bubbling up to the ret code.
self.raise_errors = False
html = self.get(url).text
doc = lxml.html.fromstring(html)
self.raise_errors = True
return doc
def scrape(self, chamber=None, session=None):
if not session:
session = max(SESSION_NUMBERS.keys())
session_number = SESSION_NUMBERS[session]
chambers = [chamber] if chamber else ['upper', 'lower']
for chamber in chambers:
url = 'http://leg.mt.gov/content/sessions/{}/{}{}Members.txt'.format(
session_number, session, 'Senate' if chamber == 'upper' else 'House'
)
yield from self.scrape_legislators(url, chamber=chamber)
def scrape_legislators(self, url, chamber):
data = self.get(url).text
data = data.replace('"""', '"') # weird triple quotes
data = data.splitlines()
fieldnames = ['last_name', 'first_name', 'party', 'district',
'address', 'city', 'state', 'zip']
csv_parser = csv.DictReader(data, fieldnames)
district_leg_urls = self._district_legislator_dict()
# Toss the row headers.
next(csv_parser)
for entry in csv_parser:
if not entry:
continue
# District.
district = entry['district']
hd_or_sd, district = district.split()
# Party.
party_letter = entry['party']
party = {'D': 'Democratic', 'R': 'Republican'}[party_letter]
# Get full name properly capped.
fullname = '%s %s' % (entry['first_name'].capitalize(),
entry['last_name'].capitalize())
# Get any info at the legislator's detail_url.
detail_url = district_leg_urls[hd_or_sd][district]
# Get the office.
address = '\n'.join([
entry['address'],
'%s, %s %s' % (entry['city'].title(), entry['state'], entry['zip'])
])
try:
deets = self._scrape_details(detail_url)
except NoDetails:
self.logger.warning("No details found at %r" % detail_url)
continue
legislator = Person(name=fullname, primary_org=chamber, district=district,
party=party, image=entry.get('photo_url', ''))
legislator.add_source(detail_url)
legislator.add_source(url)
legislator.add_link(detail_url)
legislator.add_contact_detail(type='address', value=address, note='District Office')
phone = deets.get('phone')
fax = deets.get('fax')
email = deets.get('email')
|
if phone:
legislator.add_contact_detail(type='voice', value=phone, note='District Office')
if fax:
legislator.add_contact_detail(type='fax', value=fax, note='Di
|
strict Office')
if email:
legislator.add_contact_detail(type='email', value=email, note='District Office')
yield legislator
def _district_legislator_dict(self):
'''Create a mapping of districts to the legislator who represents
each district in each house.
Used to get properly capitalized names in the legislator scraper.
'''
res = {'HD': {}, 'SD': {}}
url = 'http://leg.mt.gov/css/find%20a%20legislator.asp'
# Get base url.
parts = parse.urlparse(url)
parts._replace(path='')
baseurl = parts.geturl()
# Go the find-a-legislator page.
doc = self.url_xpath(url)
doc.make_links_absolute(baseurl)
# Get the link to the current member roster.
url = doc.xpath('//a[contains(@href, "roster.asp")]/@href')[0]
# Fetch it.
self.raise_errors = False
html = self.get(url).text
doc = lxml.html.fromstring(html)
self.raise_errors = True
# Get the new baseurl, like 'http://leg.mt.gov/css/Sessions/62nd/'
parts = parse.urlparse(url)
path, _, _ = parts.path.rpartition('/')
parts._replace(path=path)
baseurl = parts.geturl()
doc.make_links_absolute(baseurl)
table = doc.xpath('//table[@name="Legislators"]')[0]
for tr in table.xpath('tr'):
td1, td2 = tr.xpath('td')
# Skip header rows and retired legislators
if not td2.text_content().strip() or 'Resigned' in tr.text_content():
continue
# Get link to the member's page.
detail_url = td1.xpath('h4/a/@href')[0]
# Get the members district so we can match the
# profile page with its csv record.
house, district = td2.text_content().split()
res[house][district] = detail_url
return res
def _scrape_details(self, url):
'''Scrape the member's bio page.
Things available but not currently scraped are office address,
and waaay too much contact info, including personal email, phone.
'''
doc = self.url_xpath(url)
# Get base url.
parts = parse.urlparse(url)
parts._replace(path='')
baseurl = parts.geturl()
doc.make_links_absolute(baseurl)
xpath = '//img[contains(@src, "legislator")]/@src'
try:
photo_url = doc.xpath(xpath).pop()
except IndexError:
raise NoDetails('No details found at %r' % url)
details = {'photo_url': photo_url}
# # Parse address.
elements = list(doc.xpath('//b[contains(., "Address")]/..')[0])
# # MT's website currently has a typo that places the "address"
# # heading inline with the "Information Office" phone number.
# # This hack tempprarily makes things work.
elements = elements[3:]
chunks = []
for br in elements:
chunks.extend(filter(None, [br.text, br.tail]))
# As far as I can tell, MT legislators don't have capital offices.
for line in chunks[2:]:
if not line.strip():
continue
for key in ('ph', 'fax'):
if key in line.lower():
key = {'ph': 'phone'}.get(key)
break
number = re.search('\(\d{3}\) \d{3}\-\d{4}', line)
if number:
number = number.group()
if key:
# Used to set this on the office.
details[key] = number
try:
email = doc.xpath('//b[contains(., "Email")]/..')[0]
except IndexError:
pass
else:
if email:
html = lxml.html.tostring(email.getparent()).decode()
match = re.search(r'[a-zA-Z0-9\.\_\%\+\-]+@\w+\.[a-z]+', html)
if match:
details['email'] = match.group()
return details
|
purrcat259/peek
|
tests/unit/test_line.py
|
Python
|
mit
| 1,585
| 0.001262
|
import copy
import pytest
from peek.line import InvalidIpAddressException, Line, InvalidStatusException
# 127.0.0.1 - - [01/Jan/1970:00:00:01 +0000] "GET / HTTP/1.1" 200 193 "-" "Python"
test_line_contents = {
'ip_address': '127.0.0.1',
'timestamp': '[01/Jan/1970:00:00:01 +0000]',
'verb': 'GET',
'path': '/',
'status': '200',
'size': '193',
'referrer': '-',
'user_agent': 'Python'
}
def get_updated_line_contents(updates=None):
test_contents = copy.deepcopy(test_line_contents)
if updates is not None:
test_contents.update(updates)
return test_contents
test_line = Line(line_contents=test_line_contents)
class TestLineInstantiation:
@pytest.mark.parametrize('expected,actual', [
('127.0.0.1', test_line.ip_address),
(1, test_line.timestamp),
('GET', test_line.verb),
('/', test_line.path),
(200, test_line.status),
|
(193, test_line.byte_count),
('-', test_line.referrer),
('Python', test_line.user_agent)
])
def test_retrieval(self, expec
|
ted, actual):
assert expected == actual
class TestLineExceptions:
def test_passing_invalid_ip_address_throws_exception(self):
with pytest.raises(InvalidIpAddressException):
line = Line(line_contents=get_updated_line_contents({'ip_address': 'foobar'}))
def test_passing_non_parseable_status_throws_exception(self):
with pytest.raises(InvalidStatusException):
Line(line_contents=get_updated_line_contents({'status': 'foobar'}))
|
liyigerry/caixiang
|
mysite/views/weibo/oauthreturn.py
|
Python
|
mit
| 941
| 0.026567
|
from flask import request, render_template
from flask.ext.login import current_user, login_user
from mysite.weibo import Client
from mysite import app, db
from mysite.models import Wuser, User
from . import weibo
@weibo.route('/oauthreturn')
def oauthreturn():
code = request.args.get('code', '')
if code:
client = Client(app.config['API_KEY'], app.config['API_SECRET'], app.config['REDIRECT_URI'])
client.set_code(code)
uid = client.token['uid']
profile = client.get('users/show', access_token=client.access_token, uid=uid)
wuser = Wuser.query.filter_by(uid=uid).first()
if wuser:
login_user(wuser.user)
else:
user = User()
wuser = Wuser(uid=uid)
wuser.user = user
db.session.add(user)
|
login_user(user)
wuser.update_access_token(client.token['access_token'])
wuser.update_profile(profile)
db.session.add(wuser)
db.session.commit()
return render_template("weibo/profile.html", wuser=wus
|
er)
|
nikesh-mahalka/cinder
|
cinder/api/contrib/services.py
|
Python
|
apache-2.0
| 7,673
| 0
|
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import timeutils
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'services')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('services')
elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
elem.set('binary')
elem.set('host')
elem.set('zone')
elem.set('status')
elem.set('state')
elem.set('update_at')
elem.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
# TODO(uni): template elements of 'host', 'service' and 'disabled'
# should be deprecated to make ServicesUpdateTemplate consistent
# with ServicesIndexTemplate. Still keeping it here for API
# compatibility sake.
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('service')
root.set('disabled')
root.set('binary')
root.set('status')
root.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServiceController(wsgi.Controller):
def __init__(self, ext_mgr=None):
self.ext_mgr = ext_mgr
super(ServiceController, self).__init__()
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""Return a list of all running services.
Filter by host & service name.
"""
context = req.environ['cinder.context']
authorize(context, action='index')
detailed = self.ext_mgr.is_loaded('os-extended-services')
now = timeutils.utcnow(with_timezone=True)
services = objects.ServiceList.get_all(context)
host = ''
if 'host' in req.GET:
host = req.GET['host']
service = ''
if 'service' in req.GET:
service = req.GET['service']
versionutils.report_deprecated_feature(LOG, _(
"Query by service parameter is deprecated. "
"Please use binary parameter instead."))
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s.host == host]
# NOTE(uni): deprecating service request key, binary takes precedence
binary_key = binary or service
if binary_key:
services = [s for s in services if s.binary == binary_key]
svcs = []
for svc in services:
updated_at = svc.updated_at
delta = now - (svc.updated_at or svc.created_at)
delta_sec = delta.total_seconds()
if svc.modified_at:
delta_mod = now - svc.modified_at
if abs(delta_sec) >= abs(delta_mod.total_seconds()):
updated_at = svc.modified_at
alive = abs(delta_sec) <= CONF.service_down_time
art = (alive and "up") or "down"
active = 'enabled'
if svc.disabled:
active = 'disabled'
ret_fields = {'binary': svc.binary, 'host': svc.host,
'zone': svc.availability_zone,
'status': active, 'state': art,
'updated_at': timeutils.normalize_time(updated_at)}
if detailed:
ret_fields['disabled_reason'] = svc.disabled_reason
svcs.append(ret_fields)
return {'services': svcs}
def _is_valid_as_reason(self, reason):
if not reason:
return F
|
alse
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
|
except exception.InvalidInput:
return False
return True
@wsgi.serializers(xml=ServicesUpdateTemplate)
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['cinder.context']
authorize(context, action='update')
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
ret_val = {}
if id == "enable":
disabled = False
status = "enabled"
if ext_loaded:
ret_val['disabled_reason'] = None
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
else:
raise webob.exc.HTTPNotFound(explanation=_("Unknown action"))
try:
host = body['host']
except (TypeError, KeyError):
msg = _("Missing required element 'host' in request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled'] = disabled
if id == "disable-log-reason" and ext_loaded:
reason = body.get('disabled_reason')
if not self._is_valid_as_reason(reason):
msg = _('Disabled reason contains invalid characters '
'or is too long')
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled_reason'] = reason
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
if not binary_key:
raise webob.exc.HTTPBadRequest()
try:
svc = objects.Service.get_by_args(context, host, binary_key)
if not svc:
raise webob.exc.HTTPNotFound(explanation=_('Unknown service'))
svc.disabled = ret_val['disabled']
if 'disabled_reason' in ret_val:
svc.disabled_reason = ret_val['disabled_reason']
svc.save()
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("service not found"))
ret_val.update({'host': host, 'service': service,
'binary': binary, 'status': status})
return ret_val
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/volume/ext/services/api/v2"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
controller = ServiceController(self.ext_mgr)
resource = extensions.ResourceExtension('os-services', controller)
resources.append(resource)
return resources
|
eProsima/Fast-DDS
|
test/communication/liveliness_assertion.py
|
Python
|
apache-2.0
| 2,185
| 0.003661
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, subprocess, glob
script_dir = os.path.dirname(os.path.realpath(__file__))
publisher_command = os.environ.get("SIMPLE_COMMUNICATION_PUBLISHER_BIN")
if not publisher_command:
publisher_files = glob.glob(os.path.join(script_dir, "**/SimpleCommunicationPublisher*"), recursive=True)
publisher_command = next(iter(publisher_files), None)
assert publisher_command
subscriber_command = os.environ.get("SIMPLE_COMMUNICATION_SUBSCRIBER_BIN")
if not subscriber_command:
subscriber_files = glob.glob(os.path.join(script_dir, "**/SimpleCommunicationSubscriber*"), recursive=True)
subscriber_command = next(iter(subscriber_files), None)
assert subscriber_command
xml_file = os.environ.get("XML_FILE")
if xml_file:
real_xml_file = os.path.join(script_dir, xml_file)
else:
real_xml_file = os.path.join(script_dir, "liveliness_assertion.xml")
subscriber_proc = subprocess.Popen([subscriber_command, "--seed", str(os.getpid()), "--notexit",
"--xmlfile", real_xml_file])
publisher_proc = subprocess.Popen([publisher_command, "--seed", str(
|
os.getpid()), "--exit_on_lost_liveliness",
"--xmlfile", real_xml_file], stdout=subprocess.PIPE)
while True:
line = publisher_proc.stdout.readline()
if line.strip().decode('utf-8').startswith('Publisher matched with subscriber '):
print("Subscriber matched.")
break
subscriber_proc.kill()
publisher_proc.communicate()
retvalue = publisher_proc.returncode
if retvalue != 0:
print(
|
"Test failed: " + str(retvalue))
else:
print("Test successed")
sys.exit(retvalue)
|
Alwnikrotikz/pyicqt
|
src/contact.py
|
Python
|
gpl-2.0
| 9,153
| 0.037365
|
# Copyright 2005-2006 Daniel Henninger <jadestorm@nc.rr.com>
# Licensed for distribution under the GPL version 2, check COPYING for details
import utils
from twisted.internet import reactor
from twisted.words.xish.domish import Element
import jabw
import config
from debug import LogEvent, INFO, WARN, ERROR
import lang
import sha
import legacy
import globals
import base64
if not config.disableAvatars:
import Image
import StringIO
class Contact:
""" Represents a Jabber contact """
def __init__(self, jid, sub, contactList):
self.jid = jid
self.contactList = contactList
self.groups = []
self.sub = sub
self.nickname = ""
self.avatar = None
self.show = ""
self.status = ""
self.url = ""
self.ptype = "unavailable"
def removeMe(self):
""" Destroys this object. Does not remove the contact from the server's list. """
self.contactList = None
self.avatar = None
def syncContactGrantedAuth(self):
""" Since last using the transport the user has been granted authorisation by this contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "none":
self.sub = "to"
elif self.sub == "from":
self.sub = "both"
else:
return
self.updateRoster("subscribe")
def syncContactRemovedAuth(self):
""" Since last using the transport the user has been blocked by this contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "to":
self.sub = "none"
elif self.sub == "both":
self.sub = "from"
else:
return
self.updateRoster("unsubscribed")
def syncUserGrantedAuth(self):
""" Since last using the transport the user has granted authorisation to this contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "none":
self.sub = "from"
elif self.sub == "to":
self.sub = "both"
else:
return
self.updateRoster("subscribe")
def syncUserRemovedAuth(self):
""" Since last using the transport the user has removed this contact's authorisation.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "from":
self.sub = "none"
elif self.sub == "both":
self.sub = "to"
else:
return
self.updateRoster("unsubscribe")
def syncGroups(self, groups, push=True):
""" Set the groups that this contact is in on the legacy service.
By default this pushes the groups out with a presence subscribed packet. """
self.groups = groups
if push: self.updateRoster("subscribed");
def contactGrantsAuth(self):
""" Live roster event """
if self.sub == "none":
self.sub = "to"
elif self.sub == "from":
self.sub = "both"
self.sendSub("subscribed")
self.sendPresence()
def contactRemovesAuth(self):
""" Live roster event """
if self.sub == "to":
self.sub = "none"
elif self.sub == "both":
self.sub = "from"
self.sendSub("unsubscribed")
def contactRequestsAuth(self):
""" Live roster event """
self.sendSub("subscribe")
def contactDerequestsAuth(self):
""" Live roster event """
self.sendSub("unsubscribe")
def jabberSubscriptionReceived(self, subtype):
""" Updates the subscription state internally and pushes the update to the legacy server """
if subtype == "subscribe":
if self.sub == "to" or self.sub == "both":
self.sendSub("subscribed")
self.contactList.legacyList.addContact(self.jid)
elif subtype == "subscribed":
if self.sub == "none":
self.sub = "from"
if self.sub == "to":
self.sub = "both"
self.contactList.legacyList.authContact(self.jid)
elif(subtype == "unsubscribe"):
if self.sub == "none" and self.sub == "from":
self.sendSub("unsubscribed")
if self.sub == "both":
self.sub = "from"
if self.sub == "to":
self.sub = "none"
self.contactList.legacyList.removeContact(self.jid)
elif(subtype == "unsubscribed"):
if self.sub == "both":
self.sub = "to"
if self.sub == "from":
self.sub = "none"
self.contactList.legacyList.deauthContact(self.jid)
def updateNickname(self, nickname, push=True):
try:
decodednickname = unicode(self.nickname, errors='replace')
except:
decodednickname = self.nickname
if decodednickname != "nickname":
self.nickname = nickname
# will re-remove this if it's removed from JEP-0172.
#self.sendNickname()
if push: self.sendPresence()
#n = Element((None, "nick"))
#n.attributes["xmlns"] = globals.NICK
#n.addContent(nickname)
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.NICK, "current", n)
def updatePresence(self, show, status, ptype, force=False, tojid=None, url=None):
updateFlag = (self.show != show or self.status != status or self.ptype != ptype or force)
self.show = show
self.status = status
self.ptype = ptype
self.url = url
if updateFlag:
self.sendPresence(tojid)
def updateAvatar(self, avatar=None, push=True):
if config.disableAvatars: return
if self.avatar == avatar: return
self.avatar = avatar
if push: self.sendPresence()
#if self.avatar and not config.disableAvatars and not config.disablePEPAvatars:
#avatarHash = self.avatar.getImageHash()
#avatarData = self.avatar.getImageData()
#inbuff = StringIO.StringIO(avatarData)
#img = Image.open(inbuff)
#d = Element((None, "data"))
#d.attributes["xmlns"] = globals.AVATARDATA
#d.addContent(base64.encodestring(avatarData).replace("\n",""))
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.AVATARDATA, avatarHash, d)
#m = Element((None, "metadata"))
#m.attributes["xmlns"] = globals.AVATARMETADATA
#mi = m.addElement("info")
#mi.attributes["id"] = avatarHash
#mi.attributes["type"] = "image/png"
#mi.attributes["bytes"] = str(len(avatarData))
#mi.attributes["height"] = str(img.size[0])
#mi.attributes["width"] = str(img.size[1])
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.AVATARMETADATA, avatarHash, m)
def sendSub(self, ptype):
self.contactList.session.sendPresence(to=self.contactList.session.jabberID, fro=self.jid, ptype=ptype)
def sendNickname(self, tojid=None):
if not tojid:
tojid=self.contactList.session.jabberID
if self.nickname:
el = Element((None, "message"))
el.attributes["to"] = tojid
el.attributes["from"] = self.jid
nick = el.addElement("nick")
nick.attributes["xmlns"] = globals.NICK
nick.addContent(self.nickname)
self.contactList.session.pytrans.send(el)
def sendPresence(self, tojid=None):
avatarHash = ""
if self.avatar and not config.disableAvatars:
avatarHash = self.avatar.getImageHash()
caps = Element((None, "c"))
caps.attributes["xmlns"] = globals.CAPS
caps.attributes["node"] = legacy.url + "/protocol/caps"
caps.attributes["ver"] = legacy.version
if not tojid:
tojid=self.contactList.session.jabberID
self.contactList.session.sendPresence(to=tojid, fro=self.jid, ptype=self.ptype, show=self.show, status=self.status, avatarHash=avatarHash, nickname=self.nickname, payload=[caps], url=self.url)
def
|
updateRoster(self, ptype):
self.contactList.session.sendRosterImport(jid=self.j
|
id, ptype=ptype, sub=self.sub, groups=self.groups)
def fillvCard(self, vCard, jid):
if self.nickname:
NICKNAME = vCard.addElement("NICKNAME")
NICKNAME.addContent(self.nickname)
if self.avatar and not config.disableAvatars and not config.disableVCardAvatars:
PHOTO = self.avatar.makePhotoElement()
vCard.addChild(PHOTO)
user = jid.split('@')[0]
return self.contactList.session.legacycon.jabberVCardRequest(vCard, user)
class ContactList:
""" Represents the Jabber contact list """
def __init__(self, session):
LogEvent(INFO, session.jabberID)
self.session = session
self.contacts = {}
def removeMe(self):
""" Cleanly removes the object """
LogEvent(INFO, self.session.jabberID)
for jid in self.contacts:
self.contacts[jid].updatePresence("", "", "unavailable")
self.contacts[jid].removeMe()
self.contacts = {}
self.session = None
self.legacyList = None
def resendLists(self, tojid=None):
for jid in
|
Juniper/tempest
|
tempest/lib/services/compute/servers_client.py
|
Python
|
apache-2.0
| 36,704
| 0
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2017 AT&T Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import \
security_groups as security_groups_schema
from tempest.lib.api_schema.response.compute.v2_1 import servers as schema
from tempest.lib.api_schema.response.compute.v2_16 import servers as schemav216
from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
from tempest.lib.api_schema.response.compute.v2_26 import servers as schemav226
from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class ServersClient(base_compute_client.BaseComputeClient):
"""Service client for the resource /servers"""
schema_versions_info = [
{'min': None, 'max': '2.2', 'schema': schema},
{'min': '2.3', 'max': '2.5', 'schema': schemav23},
{'min': '2.6', 'max': '2.8', 'schema': schemav26},
{'min': '2.9', 'max': '2.15', 'schema': schemav29},
{'min': '2.16', 'max': '2.18', 'schema': schemav216},
{'min': '2.19', 'max': '2.25', 'schema': schemav219},
{'min': '2.26', 'max': '2.46', 'schema': schemav226},
{'min': '2.47', 'max': '2.47', 'schema': schemav247},
{'min': '2.48', 'max': None, 'schema': schemav248}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
super(ServersClient, self).__init__(
auth_provider, service, region, **kwargs)
self.enable_instance_password = enable_instance_password
def create_server(self, **kwargs):
"""Create server.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/compute/#create-server
:param name: Server name
:param imageRef: Image reference (UUID)
:param flavorRef: Flavor reference (UUID or full URL)
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
:param scheduler_hints: The name is changed to os:scheduler_hints and
the parameter is set in the same level as the parameter 'server'.
"""
body = copy.deepcopy(kwargs)
if body.get('disk_config'):
body['OS-DCF:diskConfig'] = body.pop('disk_config')
hints = None
if body.get('scheduler_hints'):
hints = {'os:scheduler_hints': body.pop('scheduler_hints')}
post_body = {'server': body}
if hints:
post_body.update(hints)
post_body = json.dumps(post_body)
resp, body = self.post('servers', post_body)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'reservation_id' in body:
return rest_client.ResponseBody(resp, body)
if self.enable_instance_password:
create_schema = schema.create_server_with_admin_pass
else:
create_schema = schema.create_server
self.validate_response(create_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def update_server(self, server_id, **kwargs):
"""Update server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#update-server
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
post_body = json.dumps({'server': kwargs})
resp, body = self.put("servers/%s" % server_id, post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_server, resp, body)
return rest_client.ResponseBody(resp, body)
def show_server(self, server_id):
"""Get server details.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#showServer
"""
resp, body = self.get("servers/%s" % server_id)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_server, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_server(self, server_id):
"""Delete server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#delete-server
"""
resp, body = self.delete("servers/%s" % server_id)
self.validate_response(schema.delete_server, resp, body)
return rest_client.ResponseBody(resp, body)
def list_servers(self, detail=False, **params):
|
"""List servers.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-servers
https://developer.openstack.org/api-ref/compute/#list-servers-detailed
"""
url =
|
'servers'
schema = self.get_schema(self.schema_versions_info)
_schema = schema.list_servers
if detail:
url += '/detail'
_schema = schema.list_servers_detail
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses(self, server_id):
"""Lists all addresses for a server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-ips
"""
resp, body = self.get("servers/%s/ips" % server_id)
body = json.loads(body)
self.validate_response(schema.list_addresses, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(server_id, network_id))
body = json.loads(body)
self.validate_response(schema.list_addresses_by_network, resp, body)
return rest_client.ResponseBody(resp, body)
def action(self, server_id, action_name,
schema=schema.server_actions_common_schema,
**kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % server_id,
|
Timurdov/bionicprojectpython
|
shadrus/article/migrations/0002_comments_comments_date.py
|
Python
|
apache-2.0
| 548
| 0.001825
|
# -
|
*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comments',
name='comments_date',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 17, 12, 54, 47, 78000, tzinfo=utc)),
|
preserve_default=False,
),
]
|
unbit/uwsgi-gif
|
uwsgiplugin.py
|
Python
|
mit
| 28
| 0.071429
|
NAME
|
='gif'
GCC_LIST=['gif
|
']
|
deeso/slow-hitter
|
src/slow/hitter.py
|
Python
|
apache-2.0
| 10,877
| 0.001747
|
from hashlib import sha256
from .etl import ETL
from kombu.mixins import ConsumerMixin
from kombu import Connection
import traceback
import Queue
import json
import time
import pytz
from datetime import datetime
from tzlocal import get_localzone
import socket
import logging
import os
class KnownHosts(object):
HOST_FILE = "/etc/hosts"
def __init__(self, filename=HOST_FILE):
self.filename = filename
try:
os.stat(self.filename)
except:
raise
self.mapping = self.read_hosts_file(filename)
@classmethod
def read_hosts_file(cls, filename):
mapping = {}
for line in open(filename).readlines():
if line.strip() == '':
continue
elif line.strip().find('#') == 0:
continue
elif len(line.split()) < 2:
continue
l = line.strip()
ip = l.split()[0]
host_names = l.split()[1:]
if len(host_names) == 0:
continue
# FIXME this means the expected mapping[ip] = host
# may not be right
ip_host_mappings = [(ip, h) for h in host_names]
for ip, host in ip_host_mappings:
mapping[host.strip()] = ip.strip()
mapping[ip.strip()] = host.strip()
return mapping
def is_ip(self, ip):
# FIXME track down a regex and use that
d = ip.split('.')
if len(d) != 3:
return False
if not all([i.isdigit() for i in d]):
return False
if not all([int(i, 10) >= 0 for i in d]):
return False
if not all([int(i, 10) <= 255 for i in d]):
return False
return True
def resolve_host(self, ip_host):
if ip_host in self.mapping and \
not self.is_ip(ip_host):
return self.mapping[ip_host]
name = ip_host
try:
name, _, _ = socket.gethostbyname(ip_host)
self.mapping[ip_host] = name
self.mapping[name] = ip_host
except:
name = ip_host
self.mapping[ip_host] = name
return name
class HitterService(ConsumerMixin):
NAME = 'processor'
BROKER_URI = "redis://127.0.0.1:6379"
BROKER_QUEUE = "mystified-catcher"
KNOWN_HOSTS = KnownHosts()
LOGSTASH_QUEUE = "logstash-results"
SYSLOG_MSG_TYPE = {
0: "EMERGENCY",
1: "ALERT",
2: "CRITICAL",
3: "ERROR",
4: "WARNING",
5: "NOTICE",
6: "INFORMATIONAL",
7: "DEBUG",
}
MY_TZ = os.environ.get('CATCHER_TZ', 'NOT_SET')
TZ_INFO = pytz.timezone(MY_TZ) if MY_TZ != 'NOT_SET' else None
def __init__(self, broker_uri=BROKER_URI, broker_queue=BROKER_QUEUE,
hosts_file=None, mongo_backend=None,
etl_backend=ETL, msg_limit=100,
# leaving it open to use kombu to buffer messages
store_uri=BROKER_URI,
store_queue=LOGSTASH_QUEUE):
if hosts_file is not None:
self.KNOWN_HOSTS = KnownHosts(filename=hosts_file)
self.broker_uri = broker_uri
self.broker_queue = broker_queue
self.store_uri = store_uri
self.store_queue = store_queue
self.mongo_backend = mongo_backend
self.etl_backend = etl_backend
self.keep_running = False
self.msg_limit = msg_limit
@classmethod
def split_alert_message(cls, data):
t = ''
msg = data
end = data.find('>')
start = data.find('<')
if len(data) < end+1:
return '', msg
if start == 0 and end > 0 and end < 10:
t = data[start+1:end]
if not t.isdigit():
return '', data
else:
msg = data[end+1:]
return t, msg
@classmethod
def calculate_msg_type(cls, data):
t, msg = cls.split_alert_message(data)
if len(t) == 0:
return "UNKNOWN"
v = int(t, 10)
if v > 7:
v &= 0x7
return cls.SYSLOG_MSG_TYPE[v]
@classmethod
def format_timestamp(self, tstamp):
if self.TZ_INFO is not None:
local_tz = self.TZ_INFO.localize(tstamp, is_dst=None)
utc_tz = local_tz.astimezone(pytz.utc)
return str(utc_tz.strftime("%Y-%m-%dT%H:%M:%S") +\
".%03d" % (tstamp.microsecond / 1000) + "Z")
return str(tstamp.strftime("%Y-%m-%dT%H:%M:%S") +\
".%03d" % (tstamp.microsecond / 1000))
@classmethod
def get_base_json(cls, syslog_msg, syslog_server_ip,
catcher_name, catcher_host, catcher_tz):
r = {'source': "syslog", 'raw': syslog_msg,
'type': 'json',
'_id': sha256(syslog_msg).hexdigest(),
'@timestamp': cls.format_timestamp(datetime.now()),
'@version': "1",
'message': "transformed syslog",
'path': '',
'tags': [],
'catcher_tz': catcher_tz,
'catcher_host': catcher_host,
'catcher_name': catcher_name
}
t, msg = cls.split_alert_message(syslog_msg)
r['syslog_level'] = cls.calculate_msg_type(syslog_msg)
r['syslog_msg'] = msg
r['syslog_tag'] = t
r['syslog_server'] = cls.resolve_host(syslog_server_ip)
r['syslog_server_ip'] = syslog_server_ip
r['syslog_catcher'] = catcher_name
return r
@classmethod
def resolve_host(cls, ip_host):
|
return cls.KNOWN_HOSTS.resolve_host(ip_host)
def process_message(self, syslog_msg,
syslog_server_ip,
catcher_name, catcher_host,
|
catcher_tz):
m = "Extracting and converting msg from %s msg (syslog: %s)" % (syslog_server_ip, catcher_name)
logging.debug(m)
r = self.get_base_json(syslog_msg, syslog_server_ip,
catcher_name, catcher_host, catcher_tz)
sm = {}
try:
result = self.etl_backend.syslog_et(syslog_msg)
sm.update(result.get('rule_results', result))
if 'rule_name' in result:
sm['rule_name'] = result.get('rule_name')
sm['tags'] = []
if sm.get('syslog_level', None) is not None:
sm['tags'].append(sm['syslog_level'])
if sm.get('rule_name', None) is not None:
sm['tags'].append(sm['rule_name'])
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
r.update(sm)
return r
def extract_message_components(self, msg_dict):
syslog_msg = msg_dict.get('syslog_msg', '')
syslog_server_ip = msg_dict.get('syslog_server_ip', '')
catcher_host = msg_dict.get('catcher_host', '')
catcher_name = msg_dict.get('catcher_name', '')
catcher_tz = msg_dict.get('catcher_tz', str(get_localzone()))
return self.process_message(syslog_msg,
syslog_server_ip,
catcher_name, catcher_host, catcher_tz)
def process_and_report(self, incoming_msg):
logging.debug("Processing and report syslog_msg")
message = incoming_msg
if isinstance(incoming_msg, str):
try:
message = json.loads(incoming_msg)
except:
message = {}
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
raise
etl_data = self.extract_message_components(message)
syslog_msg = etl_data['raw']
self.store_results(syslog_msg, etl_data)
return etl_data
def _read_messages(self, uri, queue, callback=None, cnt=1):
msgs = []
read_all = False
if cnt < 1:
read_all = True
try:
logging.debug("Reading the messages")
with Connection(uri) as conn:
q = conn.SimpleQueue(queue)
while cnt > 0 or read_all:
c
|
minhphung171093/GreenERP
|
openerp/addons/payment_adyen/models/adyen.py
|
Python
|
gpl-3.0
| 7,511
| 0.003728
|
# -*- coding: utf-'8' "-*-"
import base64
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_u
|
rl': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment),
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code':
|
fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
values.update({
'merchantReference': values['reference'],
'paymentAmount': '%d' % int(float_round(values['amount'], 2) * 100),
'currencyCode': values['currency'] and values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': values.get('partner_lang'),
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
'merchantReturnData': json.dumps({'return_url': '%s' % values.pop('return_url')}) if values.get('return_url') else False,
'merchantSig': self._adyen_generate_merchant_sig(acquirer, 'in', values),
})
return values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = _('Adyen: received data with missing reference (%s) or missing pspReference (%s)') % (reference, pspReference)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = _('Adyen: received data for reference %s') % (reference)
if not tx_ids:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = _('Adyen: invalid merchantSig, received %s, computed %s') % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'acquirer_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'acquirer_reference': data.get('pspReference'),
})
return True
else:
error = _('Adyen: feedback error')
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
|
AdvancedClimateSystems/Diode
|
diode/exceptions.py
|
Python
|
mpl-2.0
| 830
| 0
|
import json
class JSON_RPCError(Exception):
""" Base class for JSON-RPC errors. """
def to_json(self):
return json.dumps({
|
'code': self.code,
'message': self.__doc__,
})
class ParseError(JSON_RPCError):
""" Invalid JSON was received by the server
|
. An error occurred on the
server while parsing the JSON text.
"""
code = -32700
class InvalidRequestError(JSON_RPCError):
""" The JSON sent is not a valid Request object. """
code = -32600
class MethodNotFoundError(JSON_RPCError):
""" The method does not exist / is not available. """
code = -32601
class InvalidParamsError(JSON_RPCError):
""" Invalid methods parameter(s). """
code = -32602
class InternalError(JSON_RPCError):
""" Internal JSON-RPC error. """
code = -32603
|
juancferrer/Card-Magic
|
setup.py
|
Python
|
bsd-3-clause
| 463
| 0.008639
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Card-Magic',
version='1.0',
description='The best card and decks ever',
author='Juan Carlos Ferrer',
author_email='juan
|
.carlos@micronixsolutions.com',
|
packages=['cardmagic', 'cardmagic.tests'],
package_data = {
'cardmagic': [
'translations/en/LC_MESSAGES/*',
'translations/es/LC_MESSAGES/*'],
},
)
|
msnorm/projects
|
zspy2/ex41/ex41.py
|
Python
|
mit
| 2,749
| 0.005457
|
"""
********************************************************************************
Learn Python the Hard Way Third Edition, by
Zed A. Shaw
ISBN: 978-0321884916
********************************************************************************
"""
import random
from urllib import urlopen
impor
|
t sys
#debug = "DEBUG: "
WORD_URL = "http://learncodethehardway.org/words.txt"
WO
|
RDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self,@@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
#print debug + "0"
# load up the words from the website
#for word in urlopen(WORD_URL).readlines():
# once downloaded just open the file locally):
for word in open('words.txt').readlines():
WORDS.append(word.strip())
#print debug + word
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
#fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
#fake other names
for word in other_names:
result = result.replace("***", word, 1)
#fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until EOF
try:
while True:
snippets = PHRASES.keys()
#print debug + "3"
random.shuffle(snippets)
for snippet in snippets:
#print debug + "4"
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
joke2k/faker
|
faker/providers/company/th_TH/__init__.py
|
Python
|
mit
| 4,173
| 0.000946
|
from collections import OrderedDict
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = OrderedDict(
(
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
(
"{{company_limited_prefix}}{{last_name}}{{company_suffix}} {{company_limited_suffix}}",
0.2,
),
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
("{{company_prefix}}{{last_name}}", 0.2),
("{{company_prefix}}{{last_name}}{{company_suffix}}", 0.2),
("{{last_name}}{{company_suffix}}", 0.1),
("{{nonprofit_prefix}}{{last_name}}", 0.1),
("{{last_name}}-{{last_name}}", 0.05),
("{{last_name}}และ{{last_name}}", 0.05),
("{{company_limited_prefix}}{{last_name}}", 0.01),
)
)
company_prefixes = OrderedDict(
(
("ห้างหุ้นส่วนจำกัด ", 0.3),
("หจก.", 0.2),
("บจก.", 0.1),
("บมจ.", 0.1),
("ห้างหุ้นส่วนสามัญ ", 0.1),
("หสน.", 0.01),
)
)
nonprofit_prefixes = OrderedDict(
(
("สมาคม", 0.4),
("มูลนิธิ", 0.3),
("ชมรม", 0.2),
("สหภาพแรงงาน", 0.1),
)
)
company_suffixes = (
"และเพื่อน",
"และบุตร",
"แอนด์ซันส์",
"กรุ๊ป",
"การช่าง",
"ก่อสร้าง",
"บริการ",
"เซอร์วิส",
"กลการ",
"ซัพพลาย",
"คอมมิวนิเคชั่น",
"พืชผล",
"เอเยนซี",
"เอ็นจิเนียริ่ง",
"คอนสตรัคชั่น",
"วิศวกรรม",
"วิศวการ",
"คอมพิวเตอร์",
"พานิช",
"ขนส่ง",
"เฟอนิชชิ่ง",
"เฟอร์นิเ
|
จอร์",
"อุตสาหกรรม",
"เอนเตอรไพรส์",
"จิวเวลรี่",
"อะไหล่ยนต์",
"ภาพยนตร์",
"ยานยนต์",
"เทรดดิ้ง",
"การค้า",
"แลบ",
"เคมิคอล"
|
,
"อิมปอร์ตเอ็กซปอร์ต",
"อินเตอร์เนชั่นแนล",
"บรรจุภัณฑ์",
"แพคกิ้ง",
"มอเตอร์",
"โอสถ",
"การบัญชี",
"สโตร์",
)
company_limited_prefixes = OrderedDict(
(
("บริษัท ", 0.95),
("ธนาคาร", 0.03),
("บริษัทหลักทรัพย์ ", 0.005),
("กองทุนรวม", 0.005),
)
)
company_limited_suffixes = OrderedDict(
(
("จำกัด", 0.85),
("จำกัด (มหาชน)", 0.15),
)
)
def company_prefix(self) -> str:
"""
:example: 'ห้างหุ้นส่วนจำกัด'
"""
return self.random_element(self.company_prefixes)
def company_limited_prefix(self) -> str:
"""
:example: 'บริษัท'
"""
return self.random_element(self.company_limited_prefixes)
def company_limited_suffix(self) -> str:
"""
:example: 'จำกัด'
"""
return self.random_element(self.company_limited_suffixes)
def nonprofit_prefix(self) -> str:
"""
:example: 'มูลนิธิ'
"""
return self.random_element(self.nonprofit_prefixes)
|
rohitranjan1991/home-assistant
|
homeassistant/components/raspihats/binary_sensor.py
|
Python
|
mit
| 4,436
| 0.001127
|
"""Support for raspihats board binary sensors."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_NAME,
DEVICE_DEFAULT_NAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import (
CONF_BOARD,
CONF_CHANNELS,
CONF_I2C_HATS,
CONF_INDEX,
CONF_INVERT_LOGIC,
DOMAIN,
I2C_HAT_NAMES,
I2C_HATS_MANAGER,
I2CHatsException,
I2CHatsManager,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_INVERT_LOGIC = False
DEFAULT_DEVICE_CLASS = None
_CHANNELS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_INDEX): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS, default=DEFAULT_DEVICE_CLASS): cv.string,
}
]
)
_I2C_HATS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_BOARD): vol.In(I2C_HAT_NAMES),
vol.Required(CONF_ADDRESS): vol.Coerce(int),
vol.Required(CONF_CHANNELS): _CHANNELS_SCHEMA,
}
]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_I2C_HATS): _I2C_HATS_SCHEMA}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the raspihats binary_sensor devices."""
I2CHatBinarySensor.I2C_HATS_MANAGER = hass.data[DOMAIN][I2C_HATS_MANAGER]
binary_sensors = []
i2c_hat_configs = config.get(CONF_I2C_HATS, [])
for i2c_hat_config in i2c_hat_configs:
address = i2c_hat_config[CONF_ADDRESS]
board = i2c_hat_config[CONF_BOARD]
try:
assert I2CHatBinarySensor.I2C_HATS_MANAGER
I2CHatBinarySensor.I2C_HATS_MANAGER.register_board(board, address)
for channel_config in i2c_hat_config[CONF_CHANNELS]:
binary_sensors.append(
I2CHatBinarySensor(
address,
channel_config[CONF_INDEX],
channel_config[CONF_NAME],
channel_config[CONF_INVERT_LOGIC],
channel_config[CONF_DEVICE_CLASS],
)
)
except I2CHatsException as ex:
_LOGGER.error(
"Failed to register %s I2CHat@%s %s", board, hex(address), str(ex)
)
add_entities(binary_sensors)
class I2CHatBinarySensor(BinarySensorEntity):
"""Representation of a binary sensor
|
that uses a I2C-HAT digital input."""
I2C_HATS_MANAGER: I2CHatsManager | None = None
def __init__(self, address, channel, name, invert_logic, device_class):
"""Initialize the raspihats sensor."""
self._address = address
self._channel = channel
self._name
|
= name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._device_class = device_class
self._state = self.I2C_HATS_MANAGER.read_di(self._address, self._channel)
def online_callback():
"""Call fired when board is online."""
self.schedule_update_ha_state()
self.I2C_HATS_MANAGER.register_online_callback(
self._address, self._channel, online_callback
)
def edge_callback(state):
"""Read digital input state."""
self._state = state
self.schedule_update_ha_state()
self.I2C_HATS_MANAGER.register_di_callback(
self._address, self._channel, edge_callback
)
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def name(self):
"""Return the name of this sensor."""
return self._name
@property
def should_poll(self):
"""No polling needed for this sensor."""
return False
@property
def is_on(self):
"""Return the state of this sensor."""
return self._state != self._invert_logic
|
jresendiz27/EvolutionaryComputing
|
escom/pepo/utils.py
|
Python
|
apache-2.0
| 469
| 0.002132
|
__author__ = 'alberto'
import time
from functools import wraps
from config import logger
def measure_time(func):
|
"""
Decorator that reports the execution time.
"""
@wraps(func)
def wrapper(*args, **kwargs):
log
|
ger.info("Running %s", func.__name__)
start = time.time()
result = func(*args, **kwargs)
end = time.time()
logger.info("Execution time: %s", end - start)
return result
return wrapper
|
PanDAWMS/panda-server
|
pandaserver/taskbuffer/TaskBufferInterface.py
|
Python
|
apache-2.0
| 5,146
| 0.008356
|
import os
import sys
import time
import pickle
import threading
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
# required to reserve changed attributes
from pandaserver.taskbuffer import JobSpec
from pandaserver.taskbuffer import FileSpec
JobSpec.reserveChangedState = True
FileSpec.reserveChangedState = True
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
_logger = PandaLogger().getLogger('TaskBufferInterface')
# method class
class TaskBufferMethod:
def __init__(self,methodName,commDict,childlock,comLock,resLock):
self.methodName = methodName
self.childlock = childlock
self.commDict = commDict
self.comLock = comLock
self.resLock = resLock
def __call__(self,*args,**kwargs):
log = LogWrapper(_logger, 'pid={} thr={} {}'.format(os.getpid(),
threading.current_thread().ident,
self.methodName))
log.debug('start')
# get lock among children
i = self.childlock.get()
# make dict to send it master
self.commDict[i].update({'methodName': self.methodName,
'args': pickle.dumps(args),
'kwargs': pickle.dumps(kwargs)})
# send notification to master
self.comLock[i].release()
# wait response
self.resLock[i].acquire()
res = pickle.loads(self.commDict[i]['res'])
statusCode = self.commDict[i]['stat']
# release lock to children
self.childlock.put(i)
log.debug('end')
# return
if statusCode == 0:
return res
else:
errtype,errvalue = res
raise RuntimeError("{0}: {1} {2}".format(self.methodName,errtype.__name__,errvalue))
# child class
class TaskBufferInterfaceChild:
# constructor
def __init__(self,commDict,childlock,comLock,resLock):
self.childlock = childlock
self.commDict = commDict
self.comLock = comLock
self.resLock = resLock
# method emulation
def __getattr__(self,attrName):
return TaskBufferMethod(attrName,self.commDict,self.childlock,
self.comLock,self.resLock)
# master class
class TaskBufferInterface:
# constructor
def __init__(self):
# make manager to create shared objects
self.manager = multiprocessing.Manager()
# main loop
def run(self, taskBuffer, commDict, comLock, resLock, to_stop):
with ThreadPoolExecutor(max_workers=taskBuffer.get_num_connections()) as pool:
[pool.submit(self.thread_run, taskBuffer, commDict[i], comLock[i], resLock[i], to_stop) for i in commDict.keys()]
# main loop
def thread_run(self, taskBuffer, commDict, comLock, resLock, to_stop):
# main loop
while True:
# stop sign
if to_stop.value:
break
# wait for command
if not comLock.acquire(timeout=0.25):
continue
try:
# get command from child
methodName = commDict['methodName']
args = pickle.loads(commDict['args'])
kwargs = pickle.loads(commDict['kwargs'])
# execute
method = getattr(taskBuffer,methodName)
res = method(*args, **kwargs)
commDict['stat'] = 0
# set response
commDict['res'] = pickle.dumps(res)
except Exception:
res = sys.exc_info()[:2]
commDict['stat'] = 1
commDict['res'] = pickle.dumps(res)
# send response
resLock.release()
# launcher
def launch(self, taskBuffer):
# shared objects
self.childlock = multiprocessing.Queue()
self.commDict = dict()
self.comLock = dict()
self.resLock = dict()
self.to_stop = multiprocessing.Value('i', 0)
for i in range(taskBuffer.get_num_connections()):
self.childlock.put(i)
self.commDict[i] = self.manager.dict()
self.comLock[i] = multiprocessing.Semaphore(0)
self.resLock[i] = multip
|
rocessing.Semaphore(0)
# run
self.process = multiprocessing.Process(target=self.run,
args=(taskBuffer,
self.commDict, self.comLock,
self.resLock, self.to_stop))
self.process.start()
# get interface for child
def getInterface(self):
return TaskBufferInter
|
faceChild(self.commDict, self.childlock, self.comLock, self.resLock)
# stop the loop
def stop(self):
with self.to_stop.get_lock():
self.to_stop.value = 1
while self.process.is_alive():
time.sleep(1)
# kill
def terminate(self):
self.process.terminate()
|
Maximilian-Reuter/SickRage-1
|
sickrage/providers/torrent/TorrentProvider.py
|
Python
|
gpl-3.0
| 5,136
| 0.001363
|
# coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import datetime
from feedparser.util import FeedParserDict
from hachoir_parser import createParser
import sickbeard
from sickbeard import logger
from sickbeard.classes import Proper, TorrentSearchResult
from sickbeard.common import Quality
from sickbeard.db import DBConnection
from sickrage.helper.common import try_int
from sickrage.helper.exceptions import ex
from sickrage.providers.GenericProvider import GenericProvider
from sickrage.show.Show import Show
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.ratio = None
self.provider_type = GenericProvider.TORRENT
def find_propers(self, search_date=None):
results = []
db = DBConnection()
placeholder = ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST])
sql_results = db.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate'
' FROM tv_episodes AS e'
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)'
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + placeholder + ') and e.is_proper = 0'
)
|
for result in sql_results o
|
r []:
show = Show.find(sickbeard.showList, int(result[b'showid']))
if show:
episode = show.getEpisode(result[b'season'], result[b'episode'])
for term in self.proper_strings:
search_strings = self._get_episode_search_strings(episode, add_string=term)
for item in self.search(search_strings[0]):
title, url = self._get_title_and_url(item)
results.append(Proper(title, url, datetime.today(), show))
return results
def is_active(self):
return bool(sickbeard.USE_TORRENTS) and self.is_enabled()
@property
def _custom_trackers(self):
if not (sickbeard.TRACKERS_LIST and self.public):
return ''
return '&tr=' + '&tr='.join({x.strip() for x in sickbeard.TRACKERS_LIST.split(',') if x.strip()})
def _get_result(self, episodes):
return TorrentSearchResult(episodes)
def _get_size(self, item):
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
else:
size = -1
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024 * 1024:
size = -1
return try_int(size, -1)
def _get_storage_dir(self):
return sickbeard.TORRENT_DIR
def _get_title_and_url(self, item):
if isinstance(item, (dict, FeedParserDict)):
download_url = item.get('url', '')
title = item.get('title', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
download_url = item[1]
title = item[0]
else:
download_url = ''
title = ''
if title.endswith('DIAMOND'):
logger.log('Skipping DIAMOND release for mass fake releases.')
download_url = title = 'FAKERELEASE'
if download_url:
download_url = download_url.replace('&', '&')
if title:
title = title.replace(' ', '.')
return title, download_url
def _verify_download(self, file_name=None):
try:
parser = createParser(file_name)
if parser:
# pylint: disable=protected-access
# Access to a protected member of a client class
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except Exception:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log('Failed to validate torrent file: {0}'.format(ex(e)), logger.DEBUG)
logger.log('Result is not a valid torrent file', logger.DEBUG)
return False
def seed_ratio(self):
return self.ratio
|
adelez/grpc
|
src/python/grpcio_testing/grpc_testing/_common.py
|
Python
|
apache-2.0
| 4,406
| 0.000227
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common interfaces and implementation."""
import abc
import collections
import six
def _fuss(tuplified_metadata):
return tuplified_metadata + (
(
'grpc.metadata_added_by_runtime',
'gRPC is allowed to add metadata in transmission and does so.',
),
)
FUSSED_EMPTY_METADATA = _fuss(())
def fuss_with_metadata(metadata):
if metadata is None:
return FUSSED_EMPTY_METADATA
else:
return _fuss(tuple(metadata))
def rpc_names(service_descriptors):
rpc_names_to_descriptors = {}
for service_descriptor in service_descriptors:
for method_descriptor in service_descriptor.methods_by_name.values():
rpc_name = '/{}/{}'.format(
service_descriptor.full_name, method_descriptor.name)
rpc_names_to_descriptors[rpc_name] = method_descriptor
return rpc_names_to_descriptors
class ChannelRpcRead(
collections.namedtuple(
'ChannelRpcRead',
('response', 'trailing_metadata', 'code', 'details',))):
pass
class ChannelRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def initial_metadata(self):
raise NotImplementedError()
@abc.abstractmethod
def add_request(self, request):
raise NotImplementedError()
@abc.abstractmethod
def close_requests(self):
raise NotImplementedError()
@abc.abstractmethod
def take_response(self):
raise NotImplementedError()
@abc.abstractmethod
def cancel(self, code, details):
raise NotImplementedError()
@abc.abstractmethod
def termination(self):
raise NotImplementedError()
@abc.abstractmethod
def is_active(self):
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
raise NotImplementedError()
@abc.abstractmethod
def add_callback(self, callback):
raise NotImplementedError()
class ChannelHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_rpc(
self, method_full_rpc_name, invocation_metadata, requests,
requests_closed, timeout):
raise NotImplementedError()
class ServerRpcRead(
collections.namedtuple('ServerRpcRead',
('request', 'requests_closed', 'terminated',))):
pass
REQUESTS_CLOSED = ServerRpcRead(None, True, False)
TERMINATED = ServerRpcRead(None, False, True)
class ServerRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def send_initial_metadata(self, initial_metadata):
raise NotImplementedError()
@abc.abstractmethod
def take_request(self):
raise NotImplementedError()
@abc.abstractmethod
def add_response(self, respons
|
e):
|
raise NotImplementedError()
@abc.abstractmethod
def send_termination(self, trailing_metadata, code, details):
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
raise NotImplementedError()
class Serverish(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_unary_unary(
self, method_descriptor, handler, invocation_metadata, request,
deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_unary_stream(
self, method_descriptor, handler, invocation_metadata, request,
deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_unary(
self, method_descriptor, handler, invocation_metadata, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_stream(
self, method_descriptor, handler, invocation_metadata, deadline):
raise NotImplementedError()
|
autozimu/LanguageClient-neovim
|
tests/LanguageClient_test.py
|
Python
|
mit
| 9,546
| 0
|
import os
import time
import threading
import neovim
import pytest
threading.current_thread().name = "Test"
NVIM_LISTEN_ADDRESS = "/tmp/nvim-LanguageClient-IntegrationTest"
project_root = os.path.dirname(os.path.abspath(__file__))
def join_path(path: str) -> str:
"""Join path to this project tests root."""
return os.path.join(project_root, path)
PATH_MAIN_RS = join_path("data/sample-rs/src/main.rs")
PATH_LIBS_RS = join_path("data/sample-rs/src/libs.rs")
PATH_CODEACTION = join_path("data/sample-ts/src/codeAction.ts")
print(PATH_MAIN_RS)
def assertRetry(predicate, retry_max=100):
retry_delay = 0.1
retry_count = 0
while retry_count < retry_max:
if predicate():
return
else:
retry_count += 1
time.sleep(retry_delay)
assert predicate()
def getLanguageClientBuffers(nvim):
return [b for b in nvim.buffers if b.name.endswith("__LCNHover__")]
@pytest.fixture(scope="module")
def nvim() -> neovim.Nvim:
nvim = neovim.attach("socket", path=NVIM_LISTEN_ADDRESS)
time.sleep(1)
return nvim
@pytest.fixture(autouse=True)
def setup(nvim):
nvim.command("%bdelete!")
def test_textDocument_definition(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(10)
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_definition()
time.sleep(3)
assert nvim.current.window.cursor == [8, 3]
def test_textDocument_hover(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_hover()
time.sleep(1)
buf = getLanguageClientBuffers(nvim)[0]
expect = "fn greet() -> i32"
assert expect in "\n".join(buf)
def test_textDocument_rename(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
expect = [line.replace("greet", "hello") for line in nvim.current.buffer]
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_rename({"newName": "hello"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_rename_multiple_oneline(nvim):
nvim.command("edit! {}".format(PATH_LIBS_RS))
time.sleep(1)
expect = [line.replace("a", "x") for line in nvim.current.buffer]
nvim.funcs.cursor(4, 13)
# TODO: Test case where new variable length is different.
nvim.funcs.LanguageClient_textDocument_rename({"newName": "x"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("bd!")
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_rename_multiple_files(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(17, 5)
expect = [line.replace("yo", "hello") for line in nvim.current.buffer]
nvim.funcs.LanguageClient_textDocument_rename({"newName": "hello"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("bd!")
nvim.command("bd!")
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_documentSymbol(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(1, 1)
nvim.funcs.LanguageClient_textDocument_documentSymbol()
time.sleep(1)
assert nvim.funcs.getloclist(0)
nvim.command("3lnext")
assert nvim.current.window.cursor != [1, 1]
def test_workspace_symbol(nvim):
nvim.command("edit! {}".format(PATH_LIBS_RS))
time.sleep(1)
nvim.funcs.cursor(1, 1)
nvim.funcs.LanguageClient_workspace_symbol()
time.sleep(1)
assert nvim.funcs.getloclist(0)
nvim.command("1lnext")
assert nvim.current.window.cursor == [8, 0]
def test_textDocument_references(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(8, 6)
nvim.funcs.LanguageClient_textDocument_references()
time.sleep(1)
expect = ["fn greet() -> i32 {", """println!("{}", greet());"""]
assert [location["text"]
for location in nvim.funcs.getloclist(0)] == expect
nvim.command("lnext")
assert nvim.current.window.cursor == [3, 19]
def test_textDocument_references_modified_buffer(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(8, 6)
nvim.input("iabc")
time.sleep(1)
nvim.funcs.LanguageClient_textDocument_references()
time.sleep(1)
assert nvim.current.window.cursor == [8, 3]
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_languageClient_registerServerCommands(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.command('let g:responses = []')
nvim.command("call LanguageClient_registerServerCommands("
"{'bash': ['bash']}, g:responses)")
time.sleep(1)
assert nvim.vars['responses'][0]['result'] is None
def test_languageClient_registerHandlers(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.command('let g:responses = []')
nvim.command("call LanguageClient_registerHandlers("
"{'window/progress': 'HandleWindowProgress'}, g:responses)")
time.sleep(1)
assert nvim.vars['responses'][0]['result'] is None
# def test_languageClient_textDocument_codeAction(nvim):
# nvim.command("edit {}".format(PATH_CODEACTION))
# nvim.funcs.cursor(4, 14)
# assertRetry(lambda: len(nvim.funcs.getqflist()) == 1)
# nvim.funcs.LanguageClient_textDocument_codeAction()
# # Wait for fzf window showup.
# assertRetry(lambda:
# next((b for b in nvim.buffers
# if b.name.startswith('term://')), None) is not None)
# time.sleep(0.2)
# nvim.eval('feedkeys("\<CR>")')
# # Wait for fzf window dismiss.
# assertRetry(lambda:
# next((b for b in nvim.buffers
# if b.name.startswith('term://')), None) is None)
# assertRetry(lambda: len(nvim.funcs.getqflist()) == 0)
def _open_float_window(nvim):
nvim.funcs.cursor(3, 22)
pos = nvim.funcs.getpos('.')
nvim.funcs.LanguageClient_textDocument_hover()
time.sleep(1)
return pos
def test_textDocument_hover_float_window_closed_on_cursor_moved(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
buf = nvim.current.buffer
pos = _open_float_window(nvim)
float_buf = getLanguageClientBuffers(nvim)[0]
# Check if float window is open
float_winnr = nvim.funcs.bufwinnr(float_buf.number)
assert float_winnr > 0
# Check if cursor is not moved
assert buf.number == nvim.current.buffer.number
assert pos == nvim.funcs.getpos(".")
# Move cursor to left
nvim.funcs.cursor(13, 17)
# Check float window buffer was closed by CursorMoved
assert len(getLanguageClientBuffers(nvim)) == 0
def test_textDocument_hover_float_window_closed_on_entering_window(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or ear
|
lier does not support floating window")
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
win_id = nvim.funcs.win_getid()
nvim.command("split")
try:
assert win_
|
id != nvim.funcs.win_getid()
_open_float_window(nvim)
assert win_id != nvim.funcs.win_getid()
# Move to another window
nvim.funcs.win_gotoid(win_id)
assert win_id == nvim.funcs.win_getid()
# Check float window buffer was closed by BufEnter
assert len(getLanguageClientBuffers(nvim)) == 0
finally:
nvim.command("close!")
def test_textDocument_hover_float_window_closed_on_switching_to_buffer(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
# Create a new buffer
nvim.command("enew!")
another_bufnr = nvim.current.buffer.number
try:
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
|
shailr/vms
|
applicants/migrations/0011_applicant_number_of_missed_calls.py
|
Python
|
gpl-2.0
| 421
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
|
dependencies = [
('applicants', '0010_auto_201511
|
26_0525'),
]
operations = [
migrations.AddField(
model_name='applicant',
name='number_of_missed_calls',
field=models.IntegerField(default=0),
),
]
|
A92hm/expectation-maximization
|
demo.py
|
Python
|
mit
| 2,034
| 0.006391
|
"""
Author: Ali Hajimirza (ali@alihm.net)
Copyright Ali Hajimirza, free for use under MIT license.
"""
import csv
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from algorithm import EM
import argparse
d
|
ef line_plot(data_arrays, xlabel, ylabel, labels, title, f):
"""
Plots a scatter chart.
Parameters
----------
data_arrays: 2d numpy array
Data to be plotted. This array consists of matrices of real values to be plotted.
Each row of this matrix will be
|
plotted as a line on the graph.
xlabel: list of string
The list of categories on for the x axis labels. The length of this list should be equal to the
columns of the data_arrays.
ylabel: string
The label on the y axis.
labels: list of string
The labels for each category.
title: string
The title of the graph. Will be used as the name of the graph file.
dest: string, optional
Path to the directory to save the image
Returns
-------
None:
Saves the plot to the disk.
"""
plt.suptitle(title, fontsize=14)
plots = []
for data in data_arrays:
plot, = plt.plot(data)
plots.append(plot)
plt.legend(plots, labels, loc=2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(f, format="png")
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Divides data into the categories by ')
parser.add_argument('data', type=argparse.FileType("rb"), help='CSV file of data input')
args = parser.parse_args()
# reading the file
with args.data as csvfile:
reader = csv.reader(csvfile)
input_list = np.array(map(lambda line: np.array(map(lambda i: float(i), line)), reader))
x_list = input_list[:,0]
e_matrix = input_list[:,1:]
mean_matrix = EM.simulate_E_M(x_list, e_matrix, 100)
line_plot(mean_matrix, 'step', 'mean', ['Distribution 1','Distribution 2','Distribution 3'], 'E-M Learning' ,'sample_result.png' )
|
googleapis/python-storage
|
samples/snippets/storage_remove_bucket_default_owner.py
|
Python
|
apache-2.0
| 1,765
| 0
|
#!/usr/bin/env python
# Copyright 2019 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START storage_remove_bucket_default_owner]
from google.cloud import storage
def remove_bucket_default_owner(bucket_name, user_email):
"""Removes a user from the access control list of the given bucket's
default object access control list."""
# bucket_name = "your-bucket-name"
# user_email = "name@example.com"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
# Reload fetches the current ACL from Cloud Storage.
bucket.acl.reload()
# You can also use `group`, `domain`, `all_authenticated` and `all` t
|
o
# remove access for different types of entities.
bucket.default_object_acl.user(user_email).revoke_read()
bucket.default_object_acl.user(user_email).revoke_write()
bucket.default_object_acl.user(user_email).revoke_o
|
wner()
bucket.default_object_acl.save()
print(
"Removed user {} from the default acl of bucket {}.".format(
user_email, bucket_name
)
)
# [END storage_remove_bucket_default_owner]
if __name__ == "__main__":
remove_bucket_default_owner(
bucket_name=sys.argv[1], user_email=sys.argv[2]
)
|
ShivaShinde/gspread
|
setup.py
|
Python
|
mit
| 1,845
| 0.015718
|
#!/usr/bin/env python
import os.path
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
=======
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
>>>>>>> Update README.md
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
description = 'Google Spreadsheets Python API'
long_description = """
{index}
License
-------
MIT
Download
========
"""
long_description = long_description.lstrip("\n").format(index=read('docs/index.txt'))
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
read('gspread/__init__.py'), re.MULTILINE).group(1)
setup(
name='gspread',
packages=['gspread'],
description=description,
long_description=long_description,
version=version,
author='Anton Burnashev',
author_email='fuss.here@gmail.com',
url='https://github.com/burnash/gspread',
keywords=['spreadsheets', 'google-spreadsheets'],
install_requires=['requests>=2.2.1'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
|
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Office/Business :: Financial :: Spre
|
adsheet",
"Topic :: Software Development :: Libraries :: Python Modules"
],
license='MIT'
)
|
ekcs/congress
|
congress/dse/dataobj.py
|
Python
|
apache-2.0
| 3,470
| 0
|
# Copyright 2014 Plexxi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class dataObject(object):
def __init__(self, data=None, version=0):
if data is None:
self.data = {}
else:
self.data = data
if version:
self.version = version
else:
self.version = int(bool(data))
def __str__(self):
return str(self.data)
class subData(object):
"""A piece of data that a data service is subscribed to.
Each data service in the cage can have its own instance of
this data; keep track of who published which instance.
"""
def __init__(self, key, dataindex, corrId, callback):
self.key = key
self.dataindex = dataindex
self.corrId = corrId
self.callback = callback
self.dataObjects = {}
# LOG.info(
|
# "*****New subdata: %s, %s, %s",
# key, dataindex, id(self.dataObjects))
def getSources(self):
return self.dataObj
|
ects.keys()
def update(self, sender, newdata):
self.dataObjects[sender] = newdata
def version(self, sender):
version = 0
if sender in self.dataObjects:
version = self.dataObjects[sender].version
return version
def getData(self, sender):
result = dataObject()
if sender in self.dataObjects:
LOG.info("subdata object: %s", self.dataObjects[sender])
result = self.dataObjects[sender]
return result
def getAllData(self):
result = {}
for sender in self.dataObjects:
result[sender] = self.dataObjects[sender]
return result
class pubData(object):
"""A piece of data that a data service is publishing.
Keep track of those data services that are subscribed.
"""
def __init__(self, dataindex, args={}):
self.dataindex = dataindex
self.dataObject = dataObject()
self.subscribers = {}
self.requesters = {}
self.args = args
def update(self, newdata):
version = self.dataObject.version + 1
self.dataObject = dataObject(newdata, version)
def get(self):
return self.dataObject
def version(self):
return self.dataObject.version
def addsubscriber(self, sender, type, corrId):
if sender not in self.subscribers:
self.subscribers[sender] = {}
self.subscribers[sender]['type'] = type
self.subscribers[sender]['correlationId'] = corrId
def removesubscriber(self, sender):
if sender in self.subscribers:
del self.subscribers[sender]
def getsubscribers(self, sender=""):
if sender:
if sender in self.subscribers:
return self.subscribers[sender]
else:
return []
else:
return self.subscribers
|
nandhp/youtube-dl
|
youtube_dl/extractor/youjizz.py
|
Python
|
unlicense
| 2,297
| 0.001741
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '07e15fa469ba384c7693fd246905547c',
'info_dict': {
'id': '21
|
89178',
'ext': 'flv',
'title': 'Zeichentrick 1',
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
age_li
|
mit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
embed_page_url = self._search_regex(
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
webpage, 'embed page')
webpage = self._download_webpage(
embed_page_url, video_id, note='downloading embed page')
# Get the video URL
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
if m_playlist is not None:
playlist_url = m_playlist.group('playlist')
playlist_page = self._download_webpage(playlist_url, video_id,
'Downloading playlist page')
m_levels = list(re.finditer(r'<level bitrate="(\d+?)" file="(.*?)"', playlist_page))
if len(m_levels) == 0:
raise ExtractorError('Unable to extract video url')
videos = [(int(m.group(1)), m.group(2)) for m in m_levels]
(_, video_url) = sorted(videos)[0]
video_url = video_url.replace('%252F', '%2F')
else:
video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'player_url': embed_page_url,
'age_limit': age_limit,
}
|
odoousers2014/odoo-addons-supplier_price
|
purchase_group_by_period/tests/__init__.py
|
Python
|
agpl-3.0
| 821
| 0
|
# -*- coding: utf8 -*-
#
# Copyright (C) 2014 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This p
|
rogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# G
|
NU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import test_purchase_group_by_period
|
ibtokin/ibtokin
|
manage.py
|
Python
|
mit
| 805
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ibtokin.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure th
|
at the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget t
|
o activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
MaxTyutyunnikov/lino
|
obsolete/tests/8.py
|
Python
|
gpl-3.0
| 2,702
| 0.018135
|
# coding: latin1
## Copyright 2003-2007 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from lino.misc.tsttools import TestCase, main
from lino.apps.contacts.contacts_demo import startup
from lino.apps.contacts.contacts_tables import *
from lino.adamo.fi
|
lters import NotEmpty
#from lino.apps.addrbook import demo
#from lino.apps.addrbook.tables import Partner
class Case(TestCase):
def test01(self):
db = startup()
s1 = ''
q = db.query(Contact,\
"name street city.name",
orderBy="name")
q
|
.addColFilter('city',NotEmpty)
## for row in q:
## #print row[0]
## s1 += str(row[0]) + " "
## s1 += str(row[1]) + " "
## s1 += str(row[2]) + "\n"
## #print s1
## self.assertEqual(s1,"""\
## Arens None Eupen
## Ausdemwald None Aachen
## Bodard None Verviers
## Eesti Telefon Sõpruse pst. Tallinn
## Eierschal None Eupen
## Eierschal None Eupen
## Freitag None Eupen
## Girf OÜ Laki Tallinn
## Großmann None Eupen
## PAC Systems PGmbH Hütte Eupen
## Rumma & Ko OÜ Tartu mnt. Tallinn
## Saffre None Tallinn
## """)
s2 = ''
for row in q:
s2 += unicode(row.name) + " "
if row.street is not None:
s2 += unicode(row.street) + " "
s2 += unicode(row.city.name) + "\n"
#print s2
self.assertEquivalent(s2,u"""\
Andreas Arens Eupen
Anton Ausdemwald Aachen
Emil Eierschal Eupen
Erna Eierschal Eupen
Frédéric Freitag Eupen
Gerd Großmann Eupen
Hans Flott Bierstraße München
Henri Bodard Verviers
Kati Kask Tallinn
Kurtz & Büntig Bergstraße Eupen
Mets & puu OÜ Tartu mnt. Tallinn
Reisebüro Freitag Hütte Eupen
Tõnu Tamm Tallinn
""")
# some other cases (for example 80.py) would fail if run
# together with this case in one suite and if the following
# lines were not:
db.shutdown()
if __name__ == '__main__':
main()
|
kartta-labs/mapwarper
|
lib/tilestache/TileStache-1.51.5/TileStache/Providers.py
|
Python
|
mit
| 12,088
| 0.002978
|
""" The provider bits of TileStache.
A Provider is the part of TileStache that actually renders imagery. A few default
providers are found here, but it's possible to define your own and pull them into
TileStache dynamically by class name.
Built-in providers:
- mapnik (Mapnik.ImageProvider)
- proxy (Proxy)
- vector (TileStache.Vector.Provider)
- url template (UrlTemplate)
- mbtiles (TileStache.MBTiles.Provider)
- mapnik grid (Mapnik.GridProvider)
Example built-in provider, for JSON configuration file:
"layer-name": {
"provider": {"name": "mapnik", "mapfile": "style.xml"},
...
}
Example external provider, for JSON configuration file:
"layer-name": {
"provider": {"class": "Module:Classname", "kwargs": {"frob": "yes"}},
...
}
- The "class" value is split up into module and classname, and dynamically
included. If this doesn't work for some reason, TileStache will fail loudly
to let you know.
- The "kwargs" value is fed to the class constructor as a dictionary of keyword
args. If your defined class doesn't accept any of these keyword arguments,
TileStache will throw an exception.
A provider must offer one of two methods for rendering map areas.
The renderTile() method draws a single tile at a time, and has these arguments:
- width, height: in pixels
- srs: projection as Proj4 string.
"+proj=longlat +ellps=WGS84 +datum=WGS84" is an example,
see http://spatialreference.org for more.
- coord: Coordinate object representing a single tile.
The renderArea() method draws a variably-sized area, and is used when drawing
metatiles. It has these arguments:
- width, height: in pix
|
els
- srs: projection as Proj4 string.
"+proj=longlat +ellps=WGS84 +datum=WGS84" is an example,
see http://spatialreference.org for more.
- xmin, ymin, xmax, ymax: coordinates of bounding box in projected coordinates.
- zoom: zoom level of final map. Tech
|
nically this can be derived from the other
arguments, but that's a hassle so we'll pass it in explicitly.
A provider may offer a method for custom response type, getTypeByExtension().
This method accepts a single argument, a filename extension string (e.g. "png",
"json", etc.) and returns a tuple with twon strings: a mime-type and a format.
Note that for image and non-image tiles alike, renderArea() and renderTile()
methods on a provider class must return a object with a save() method that
can accept a file-like object and a format name, e.g. this should word:
provder.renderArea(...).save(fp, "TEXT")
... if "TEXT" is a valid response format according to getTypeByExtension().
Non-image providers and metatiles do not mix.
For an example of a non-image provider, see TileStache.Vector.Provider.
"""
import os
import logging
try:
from io import BytesIO
except ImportError:
# Python 2
from StringIO import StringIO as BytesIO
from string import Template
try:
import urllib.request as urllib2
except ImportError:
# Python 2
import urllib2
import urllib
try:
from PIL import Image
except ImportError:
# On some systems, PIL.Image is known as Image.
import Image
import ModestMaps
from ModestMaps.Core import Point, Coordinate
from . import Geography
# This import should happen inside getProviderByName(), but when testing
# on Mac OS X features are missing from output. Wierd-ass C libraries...
try:
from . import Vector
except ImportError:
pass
# Already deprecated; provided for temporary backward-compatibility with
# old location of Mapnik provider. TODO: remove in next major version.
try:
from .Mapnik import ImageProvider as Mapnik
except ImportError:
pass
def getProviderByName(name):
""" Retrieve a provider object by name.
Raise an exception if the name doesn't work out.
"""
if name.lower() == 'mapnik':
from . import Mapnik
return Mapnik.ImageProvider
elif name.lower() == 'proxy':
return Proxy
elif name.lower() == 'url template':
return UrlTemplate
elif name.lower() == 'vector':
from . import Vector
return Vector.Provider
elif name.lower() == 'mbtiles':
from . import MBTiles
return MBTiles.Provider
elif name.lower() == 'mapnik grid':
from . import Mapnik
return Mapnik.GridProvider
elif name.lower() == 'sandwich':
from . import Sandwich
return Sandwich.Provider
raise Exception('Unknown provider name: "%s"' % name)
class Verbatim:
''' Wrapper for PIL.Image that saves raw input bytes if modes and formats match.
'''
def __init__(self, bytes):
self.buffer = BytesIO(bytes)
self.format = None
self._image = None
#
# Guess image format based on magic number, if possible.
# http://www.astro.keele.ac.uk/oldusers/rno/Computing/File_magic.html
#
magic = {
'\x89\x50\x4e\x47': 'PNG',
'\xff\xd8\xff\xe0': 'JPEG',
'\x47\x49\x46\x38': 'GIF',
'\x47\x49\x46\x38': 'GIF',
'\x4d\x4d\x00\x2a': 'TIFF',
'\x49\x49\x2a\x00': 'TIFF'
}
if bytes[:4] in magic:
self.format = magic[bytes[:4]]
else:
self.format = self.image().format
def image(self):
''' Return a guaranteed instance of PIL.Image.
'''
if self._image is None:
self._image = Image.open(self.buffer)
return self._image
def convert(self, mode):
if mode == self.image().mode:
return self
else:
return self.image().convert(mode)
def crop(self, bbox):
return self.image().crop(bbox)
def save(self, output, format):
if format == self.format:
output.write(self.buffer.getvalue())
else:
self.image().save(output, format)
class Proxy:
""" Proxy provider, to pass through and cache tiles from other places.
This provider is identified by the name "proxy" in the TileStache config.
Additional arguments:
- url (optional)
URL template for remote tiles, for example:
"http://tile.openstreetmap.org/{Z}/{X}/{Y}.png"
- provider (optional)
Provider name string from Modest Maps built-ins.
See ModestMaps.builtinProviders.keys() for a list.
Example: "OPENSTREETMAP".
- timeout (optional)
Defines a timeout in seconds for the request.
If not defined, the global default timeout setting will be used.
Either url or provider is required. When both are present, url wins.
Example configuration:
{
"name": "proxy",
"url": "http://tile.openstreetmap.org/{Z}/{X}/{Y}.png"
}
"""
def __init__(self, layer, url=None, provider_name=None, timeout=None):
""" Initialize Proxy provider with layer and url.
"""
if url:
self.provider = ModestMaps.Providers.TemplatedMercatorProvider(url)
elif provider_name:
if provider_name in ModestMaps.builtinProviders:
self.provider = ModestMaps.builtinProviders[provider_name]()
else:
raise Exception('Unkown Modest Maps provider: "%s"' % provider_name)
else:
raise Exception('Missing required url or provider parameter to Proxy provider')
self.timeout = timeout
@staticmethod
def prepareKeywordArgs(config_dict):
""" Convert configured parameters to keyword args for __init__().
"""
kwargs = dict()
if 'url' in config_dict:
kwargs['url'] = config_dict['url']
if 'provider' in config_dict:
kwargs['provider_name'] = config_dict['provider']
if 'timeout' in config_dict:
kwargs['timeout'] = config_dict['timeout']
return kwargs
def renderTile(self, width, height, srs, coord):
"""
"""
img = None
urls = self.provider.getTileUrls(coord)
# Tell u
|
sahana/Turkey
|
modules/s3/s3sync.py
|
Python
|
mit
| 33,252
| 0.003248
|
# -*- coding: utf-8 -*-
""" S3 Synchronization
@copyright: 2011-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import datetime
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from s3datetime import s3_parse_datetime, s3_utc
from s3rest import S3Method
from s3import import S3ImportItem
from s3query import S3URLQuery
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3SYNC: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class S3Sync(S3Method):
""" Synchronization Handler """
def __init__(self):
""" Constructor """
S3Method.__init__(self)
self.log = S3SyncLog()
self._config = None
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
RESTful method handler (repository/sync, repository/register)
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
output = dict()
if r.method == "sync":
if r.http == "GET":
# Incoming pull
output = self.__send(r, **attr)
elif r.http in ("PUT", "POST"):
# Incoming push
output = self.__receive(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif r.name == "repository" and r.method == "register":
if r.http == "GET":
# Incoming registration request
output = self.__register(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
# REST Methods:
# -------------------------------------------------------------------------
def __register(self, r, **attr):
"""
Respond to an incoming registration request
@param r: the S3Request
@param attr: the controller attributes
"""
log = self.log
result = log.SUCCESS
message = "registration successful"
repository_id = None
if "repository" in r.vars:
ruid = r.vars["repository"]
db = current.db
rtable = current.s3db.sync_repository
row = db(rtable.uuid == ruid).select(limitby=(0, 1)).first()
if row:
repository_id = row.id
if not row.accept_push and current.auth.s3_has_role("ADMIN"):
row.update_record(accept_push=True)
else:
if current.auth.s3_has_role("ADMIN"):
accept_push = True
else:
accept_push = False
repository_id = rtable.insert(name=ruid,
uuid=ruid,
accept_push=accept_push)
if not repository_id:
result = log.ERROR
message = "registration failed"
else:
result = log.ERROR
message = "no repository identifier specified"
if result == log.SUCCESS:
output = current.xml.json_message(message=message,
sender="%s" % self.config.uuid)
else:
output = current.xml.json_message(False, 400,
message=message,
sender="%s" % self.config.uuid)
# Set content type header
headers = current.response.headers
headers["Content-Type"] = "application/json"
# Log the operation
log.write(repository_id=repository_id,
resource_name=log.NONE,
transmission=log.IN,
mode=log.REGISTER,
result=result,
message=message)
return output
# -------------------------------------------------------------------------
def __send(self, r, **attr):
"""
Respond to an incoming pull
@param r: the S3Request
@param attr: the controller attributes
"""
mixed = attr.get("mixed", False)
get_vars = r.get_vars
resource = r.resource
# Identify the requesting repository
repository_uuid = get_vars.get("repository")
connector = None
if repository_uuid:
rtable = current.s3db.sync_repository
query = rtable.uuid == repository_uuid
row = current.db(query).select(limitby=(0, 1)).first()
if row:
connector = S3SyncRepository(row)
if connector is None:
# Use a dummy repository with Eden API
connector = S3SyncRepository(Storage(id = None,
name = "unknown",
apitype = "eden",
))
current.log.debug("S3Sync PULL from %s (%s)" % (connector.name,
connector.apitype))
# Additional export parameters
start = get_vars.get("start", None)
if start is not None:
try:
start = int(start)
except ValueError:
start = None
limit =
|
get_vars.get("limit", None)
if limit is not None:
try:
limit = int(limit)
except ValueError:
limit = None
msince = get_vars.get("msince", None)
if msince is not None:
|
msince = s3_parse_datetime(msince)
# Sync filters from peer
filters = {}
for k, v in get_vars.items():
if k[0] == "[" and "]" in k:
tablename, urlvar = k[1:].split("]", 1)
if urlvar:
if not tablename or tablename == "~":
tablename = resource.tablename
f = filters.get(tablename, {})
u = f.get(urlvar, None)
if u:
u = "%s&%s" % (u, v)
else:
u = v
f[urlvar] = u
filters[tablename] = f
if not filters:
filters = None
try:
result = connector.send(resource,
start = start,
limit = limit,
msince = msince,
filters = filters,
mixed = mixed,
|
jmlong1027/multiscanner
|
tests/modules/test_2.py
|
Python
|
mpl-2.0
| 649
| 0
|
"""
A test module which has a required module and a config
"""
TYPE = "Test"
NAME = "test_2"
REQUIRES = ["test_1"]
DEFAULTCONF = {'a': 1, 'b': 2}
def check(conf=DEFAULTCONF):
if None in REQUIRES:
return False
return True
def scan(filelist, conf=DEFAULTCONF):
results = []
result1, meta1 = REQUIRES[0]
result1 = dict(result1)
for fname in filelist:
if fname in result1:
|
results.append((fname, True))
else:
results.append((fname, fname))
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
metadata["Incl
|
ude"] = True
return results, metadata
|
wpoely86/easybuild-easyblocks
|
easybuild/easyblocks/p/python.py
|
Python
|
gpl-2.0
| 8,497
| 0.003884
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Python, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import copy
import os
import re
import fileinput
import sys
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_libdir, get_software_libdir, get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
EXTS_FILTER_PYTHON_PACKAGES = ('python -c "import %(ext_name)s"', "")
class EB_Python(ConfigureMake):
"""Support for building/installing Python
- default configure/build_step/make install works fine
To extend Python by adding extra packages there are two ways:
- list the packages in the exts_list, this will include the packages in this Python installation
- create a seperate easyblock, so the packages can be loaded with module load
e.g., you can include numpy and scipy in a default Python installation
but also provide newer updated numpy and sci
|
py versions by creating a PythonPackage-derived easyblock f
|
or it.
"""
def prepare_for_extensions(self):
"""
Set default class and filter for Python packages
"""
# build and install additional packages with PythonPackage easyblock
self.cfg['exts_defaultclass'] = "PythonPackage"
self.cfg['exts_filter'] = EXTS_FILTER_PYTHON_PACKAGES
# don't pass down any build/install options that may have been specified
# 'make' options do not make sense for when building/installing Python libraries (usually via 'python setup.py')
msg = "Unsetting '%s' easyconfig parameter before building/installing extensions: %s"
for param in ['buildopts', 'installopts']:
if self.cfg[param]:
self.log.debug(msg, param, self.cfg[param])
self.cfg[param] = ''
def configure_step(self):
"""Set extra configure options."""
self.cfg.update('configopts', "--with-threads --enable-shared")
# Need to be careful to match the unicode settings to the underlying python
if sys.maxunicode == 1114111:
self.cfg.update('configopts', "--enable-unicode=ucs4")
elif sys.maxunicode == 65535:
self.cfg.update('configopts', "--enable-unicode=ucs2")
else:
raise EasyBuildError("Unknown maxunicode value for your python: %d" % sys.maxunicode)
modules_setup_dist = os.path.join(self.cfg['start_dir'], 'Modules', 'Setup.dist')
libreadline = get_software_root('libreadline')
if libreadline:
ncurses = get_software_root('ncurses')
if ncurses:
readline_libdir = get_software_libdir('libreadline')
ncurses_libdir = get_software_libdir('ncurses')
readline_static_lib = os.path.join(libreadline, readline_libdir, 'libreadline.a')
ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, 'libncurses.a')
readline = "readline readline.c %s %s" % (readline_static_lib, ncurses_static_lib)
for line in fileinput.input(modules_setup_dist, inplace='1', backup='.readline'):
line = re.sub(r"^#readline readline.c.*", readline, line)
sys.stdout.write(line)
else:
raise EasyBuildError("Both libreadline and ncurses are required to ensure readline support")
openssl = get_software_root('OpenSSL')
if openssl:
for line in fileinput.input(modules_setup_dist, inplace='1', backup='.ssl'):
line = re.sub(r"^#SSL=.*", "SSL=%s" % openssl, line)
line = re.sub(r"^#(\s*-DUSE_SSL -I)", r"\1", line)
line = re.sub(r"^#(\s*-L\$\(SSL\)/lib )", r"\1 -L$(SSL)/lib64 ", line)
sys.stdout.write(line)
tcl = get_software_root('Tcl')
tk = get_software_root('Tk')
if tcl and tk:
tclver = get_software_version('Tcl')
tkver = get_software_version('Tk')
tcltk_maj_min_ver = '.'.join(tclver.split('.')[:2])
if tcltk_maj_min_ver != '.'.join(tkver.split('.')[:2]):
raise EasyBuildError("Tcl and Tk major/minor versions don't match: %s vs %s", tclver, tkver)
self.cfg.update('configopts', "--with-tcltk-includes='-I%s/include -I%s/include'" % (tcl, tk))
tcl_libdir = os.path.join(tcl, get_software_libdir('Tcl'))
tk_libdir = os.path.join(tk, get_software_libdir('Tk'))
tcltk_libs = "-L%(tcl_libdir)s -L%(tk_libdir)s -ltcl%(maj_min_ver)s -ltk%(maj_min_ver)s" % {
'tcl_libdir': tcl_libdir,
'tk_libdir': tk_libdir,
'maj_min_ver': tcltk_maj_min_ver,
}
self.cfg.update('configopts', "--with-tcltk-libs='%s'" % tcltk_libs)
super(EB_Python, self).configure_step()
def install_step(self):
"""Extend make install to make sure that the 'python' command is present."""
super(EB_Python, self).install_step()
python_binary_path = os.path.join(self.installdir, 'bin', 'python')
if not os.path.isfile(python_binary_path):
pythonver = '.'.join(self.version.split('.')[0:2])
srcbin = "%s%s" % (python_binary_path, pythonver)
try:
os.symlink(srcbin, python_binary_path)
except OSError, err:
raise EasyBuildError("Failed to symlink %s to %s: %s", srcbin, python_binary_path, err)
def sanity_check_step(self):
"""Custom sanity check for Python."""
pyver = "python%s" % '.'.join(self.version.split('.')[0:2])
try:
fake_mod_data = self.load_fake_module()
except EasyBuildError, err:
raise EasyBuildError("Loading fake module failed: %s", err)
abiflags = ''
if LooseVersion(self.version) >= LooseVersion("3"):
run_cmd("which python", log_all=True, simple=False)
cmd = 'python -c "import sysconfig; print(sysconfig.get_config_var(\'abiflags\'));"'
(abiflags, _) = run_cmd(cmd, log_all=True, simple=False)
if not abiflags:
raise EasyBuildError("Failed to determine abiflags: %s", abiflags)
else:
abiflags = abiflags.strip()
custom_paths = {
'files': ["bin/%s" % pyver, "lib/lib%s%s.%s" % (pyver, abiflags, get_shared_lib_ext())],
'dirs': ["include/%s%s" % (pyver, abiflags), "lib/%s" % pyver],
}
# cleanup
self.clean_up_fake_module(fake_mod_data)
custom_commands = [
('python', '--version'),
('python', '-c "import _ctypes"'), # make sure that foreign function interface (libffi) works
|
atiqueahmedziad/addons-server
|
src/olympia/amo/management/commands/compress_assets.py
|
Python
|
bsd-3-clause
| 8,858
| 0.000226
|
import hashlib
import os
import re
import time
import uuid
import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.staticfiles.finders import find as find_static_path
from olympia.lib.jingo_minify_helpers import ensure_path_exists
def run_command(command):
"""Run a command and correctly poll the output and write that to stdout"""
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip())
return process.poll()
class Command(BaseCommand):
help = ('Compresses css and js assets defined in settings.MINIFY_BUNDLES')
# This command must not do any system checks because Django runs db-field
# related checks since 1.10 which require a working MySQL connection.
# We don't have that during our docker builds and since `compress_assets`
# is being used while building our docker images we have to disable them.
requires_system_checks = False
checked_hash = {}
bundle_hashes = {}
missing_files = 0
minify_skipped = 0
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument(
'force', action='store_true',
help='Ignores modified/created dates and forces compression.')
def generate_build_id(self):
return uuid.uuid4().hex[:8]
def update_hashes(self):
# Adds a time based hash on to the build id.
self.build_id = '%s-%s' % (
self.generate_build_id(), hex(int(time.time()))[2:])
build_id_file = os.path.realpath(
os.path.join(settings.ROOT, 'build.py'))
with open(build_id_file, 'w') as f:
f.write('BUILD_ID_CSS = "%s"\n' % self.build_id)
f.write('BUILD_ID_JS = "%s"\n' % self.build_id)
f.write('BUILD_ID_IMG = "%s"\n' % self.build_id)
f.write('BUNDLE_HASHES = %s\n' % self.bundle_hashes)
def handle(self, **options):
self.force_compress = options.get('force', False)
# This will loop through every bundle, and do the following:
# - Concat all files into one
# - Cache bust all images in CSS files
# - Minify the concatted files
for ftype, bundle in settings.MINIFY_BUNDLES.iteritems():
for name, files in bundle.iteritems():
# Set the paths to the files.
concatted_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-all.%s' % (name, ftype,))
compressed_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-min.%s' % (name, ftype,))
ensure_path_exists(concatted_file)
ensure_path_exists(compressed_file)
files_all = []
for fn in files:
processed = self._preprocess_file(fn)
# If the file can't be processed, we skip it.
if processed is not None:
files_all.append(processed)
# Concat all the files.
tmp_concatted = '%s.tmp' % concatted_file
if len(files_all) == 0:
raise CommandError(
'No input files specified in '
'MINIFY_BUNDLES["%s"]["%s"] in settings.py!' %
(ftype, name)
)
run_command('cat {files} > {tmp}'.format(
files=' '.join(files_all),
tmp=tmp_concatted
))
# Cache bust individual images in the CSS.
if ftype == 'css':
bundle_hash = self._cachebust(tmp_concatted, name)
self.bundle_hashes['%s:%s' % (ftype, name)] = bundle_hash
# Compresses the concatenations.
is_changed = self._is_changed(concatted_file)
self._clean_tmp(concatted_file)
if is_changed or not os.path.isfile(compressed_file):
self._minify(ftype, concatted_file, compressed_file)
else:
print(
'File unchanged, skipping minification of %s' % (
concatted_file))
self.minify_skipped += 1
# Write out the hashes
self.update_hashes()
if self.minify_skipped:
print(
'Unchanged files skipped for minification: %s' % (
self.minify_skipped))
def _preprocess_file(self, filename):
"""Preprocess files and return new filenames."""
css_bin = filename.endswith('.less') and settings.LESS_BIN
source = find_static_path(filename)
target = source
if css_bin:
target = '%s.css' % source
run_command('{lessc} {source} {target}'.format(
lessc=css_bin,
source=str(source),
target=str(target)))
return target
def _is_changed(self, concatted_file):
"""Check if the file has been changed."""
if self.force_compress:
return True
tmp_concatted = '%s.tmp' % concatted_file
file_exists = (
os.path.exists(concatted_file) and
os.path.getsize(concatted_file) == os.path.getsiz
|
e(tmp_concatted))
if file_exists:
orig_hash = self._file_hash(concatted_file)
temp_hash = self._file_hash(tmp_concatted)
return orig_hash != temp_hash
return True # Different filesize, so it was definitely changed
def _clean_tmp(self, concatted_file):
"""Replace the old file with the temp
|
file."""
tmp_concatted = '%s.tmp' % concatted_file
if os.path.exists(concatted_file):
os.remove(concatted_file)
os.rename(tmp_concatted, concatted_file)
def _cachebust(self, css_file, bundle_name):
"""Cache bust images. Return a new bundle hash."""
self.stdout.write(
'Cache busting images in %s\n' % re.sub('.tmp$', '', css_file))
if not os.path.exists(css_file):
return
css_content = ''
with open(css_file, 'r') as css_in:
css_content = css_in.read()
def _parse(url):
return self._cachebust_regex(url, css_file)
css_parsed = re.sub('url\(([^)]*?)\)', _parse, css_content)
with open(css_file, 'w') as css_out:
css_out.write(css_parsed)
# Return bundle hash for cachebusting JS/CSS files.
file_hash = hashlib.md5(css_parsed).hexdigest()[0:7]
self.checked_hash[css_file] = file_hash
if self.missing_files:
self.stdout.write(
' - Error finding %s images\n' % (self.missing_files,))
self.missing_files = 0
return file_hash
def _minify(self, ftype, file_in, file_out):
"""Run the proper minifier on the file."""
if ftype == 'js' and hasattr(settings, 'UGLIFY_BIN'):
opts = {'method': 'UglifyJS', 'bin': settings.UGLIFY_BIN}
run_command('{uglify} -v -o {target} {source} -m'.format(
uglify=opts['bin'],
target=file_out,
source=file_in))
elif ftype == 'css' and hasattr(settings, 'CLEANCSS_BIN'):
opts = {'method': 'clean-css', 'bin': settings.CLEANCSS_BIN}
run_command('{cleancss} -o {target} {source}'.format(
cleancss=opts['bin'],
target=file_out,
source=file_in))
self.stdout.write(
'Minifying %s (using %s)\n' % (file_in, opts['method']))
def _file_hash(self, url):
"""Open the file and get a hash of it."""
if url in self.checked_hash:
return self.checked_hash[url]
file_hash = ''
try:
with open(url) as f:
file_hash
|
cbonoz/codehealth
|
dependencies/baron/tokenizer.py
|
Python
|
mit
| 3,585
| 0.000837
|
import re
class UnknowItem(Exception):
pass
KEYWORDS = ("and", "as", "assert", "break", "class", "continue", "def", "del", "elif", "else", "except", "exec", "finally", "for", "from", "global", "if", "import", "in", "is", "lambda", "not", "or", "pass", "print", "raise", "return", "try", "while", "with", "yield")
TOKENS = (
(r'[a-zA-Z_]\w*', 'NAME'),
(r'0', 'INT'),
(r'[-+]?\d+[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'[-+]?\d+.\d?[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'[-+]?\d?.\d+[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'\d+[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\d+\.\d*[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\.\d+[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\d*\.\d+[jJ]', 'COMPLEX'),
(r'\d+\.[jJ]', 'COMPLEX'),
(r'\d+[jJ]', 'COMPLEX'),
(r'\d+\.', 'FLOAT'),
(r'\d*\.\d+[lL]?', 'FLOAT'),
(r'\d+\.\d*[lL]?', 'FLOAT'),
(r'\.', 'DOT'),
(r'[1-9]+\d*[lL]', 'LONG'),
(r'[1-9]+\d*', 'INT'),
(r'0[xX][\da-fA-F]+[lL]?', 'HEXA'),
(r'(0[oO][0-7]+)|(0[0-7]*)[lL]?', 'OCTA'),
(r'0[bB][01]+[lL]?', 'BINARY'),
(r'\(', 'LEFT_PARENTHESIS'),
(r'\)', 'RIGHT_PARENTHESIS'),
(r':', 'COLON'),
(r',', 'COMMA'),
(r';', 'SEMICOLON'),
(r'@', 'AT'),
(r'\+', 'PLUS'),
(r'-', 'MINUS'),
(r'\*', 'STAR'),
(r'/', 'SLASH'),
(r'\|', 'VBAR'),
(r'&', 'AMPER'),
(r'<', 'LESS'),
(r'>', 'GREATER'),
(r'=', 'EQUAL'),
(r'%', 'PERCENT'),
(r'\[', 'LEFT_SQUARE_BRACKET'),
(r'\]', 'RIGHT_SQUARE_BRACKET'),
(r'\{', 'LEFT_BRACKET'),
(r'\}', 'RIGHT_BRACKET'),
(r'`', 'BACKQUOTE'),
(r'==', 'EQUAL_EQUAL'),
(r'<>', 'NOT_EQUAL'),
(r'!=', 'NOT_EQUAL'),
(r'<=', 'LESS_EQUAL'),
(r'>=', 'GREATER_EQUAL'),
(r'~', 'TILDE'),
(r'\^', 'CIRCUMFLEX'),
(r'<<', 'LEFT_SHIFT'),
(r'>>', 'RIGHT_SHIFT'),
(r'\*\*', 'DOUBLE_STAR'),
(r'\+=', 'PLUS_EQUAL'),
(r'-=', 'MINUS_EQUAL'),
(r'\*=', 'STAR_EQUAL'),
(r'/=', 'SLASH_EQUAL'),
(r'%=', 'PERCENT_EQUAL'),
(r'&=', 'AMPER_EQUAL'),
(r'\|=', 'VBAR_EQUAL'),
(r'\^=', 'CIRCUMFLEX_EQUAL'),
(r'<<=', 'LEFT_SHIFT_EQUAL'),
(r'>>=', 'RIGHT_SHIFT_EQUAL'),
(r'\*\*=', 'DOUBLE_STAR_EQUAL'),
(r'//', 'DOUBLE_SLASH'),
(r'//=', 'DOUBLE_SLASH_EQUAL'),
(r'\n', 'ENDL'),
(r'\r\n', 'ENDL'),
(r'#.*', 'COMMENT'),
(r'(\s|\\\n|\\\r\n)+', 'SPACE'),
(r'["\'](.|\n|\r)*["\']', 'STRING'),
(r'[uU]["\'](.|\n|\r)*["\']', 'UNICODE_STRING'),
(r'[rR]["\'](.|\n|\r)*["\']', 'RAW_STRING'),
(r'[bB]["\'](.|\n|\r)*["\']', 'BINARY_STRING'),
(r'[uU][rR]["\'](.|\n|\r)*["\']', 'UNICODE_RAW_STRING'),
(r'[bB][rR]["\'](.|\n|\r)*["\']', 'BINARY_RAW_STRING'),
)
TOKENS = [(re.compile('^' + x[0] + '$'), x[1]) for x in TOKENS]
def tokenize(sequence, print_function=False):
return list(tokenize_generator(sequence, print_function))
def tokenize_current_keywords(print_function=False):
if print_function is True:
return [x for x in KEYWORDS if x != "print"]
else:
return KEYWORDS
def tokenize_generator(sequence, print_function=False):
cu
|
rrent_keywords = tokenize_current_keywords()
for item in sequence:
if item in current_keywords:
yield (item.upper(), item)
continue
for candidate, token_name in TOKENS:
if candidate.match(item):
yield (to
|
ken_name, item)
break
else:
raise UnknowItem("Can't find a matching token for this item: '%s'" % item)
yield ('ENDMARKER', '')
yield
|
jorrit-steporange/CumulusCI
|
ci/github/tag_to_tag.py
|
Python
|
bsd-3-clause
| 1,855
| 0.012399
|
import os
import sys
from github import Github
from github.GithubException import GithubException
def tag_to_tag():
SRC_TAG=os.environ.get('SRC_TAG')
ORG_NAME=os.environ.get('ORG_NAME
|
')
REPO_NAME=os.environ.get('REPO_NAME')
USERNAME=os.environ.get('USERNAME')
PASSWORD=os.environ.get('PASSWORD')
TAG=os.environ.get('TAG')
print 'Attempting to create tag %s from tag %s' % (TAG, SRC_TAG)
g = Github(USERNAME,PASSWORD)
org = g.get_organization(ORG_NAME)
repo = org.get_repo(REPO_NAME)
# Get the source tag by name, error if
|
none found
src_tag = None
for tag in repo.get_tags():
print tag.name
if tag.name == SRC_TAG:
src_tag = tag
break
if not src_tag:
print 'No tag named %s found' % SRC_TAG
exit(1)
tag = repo.create_git_tag(TAG, 'Created from tag %s' % SRC_TAG, src_tag.commit.sha, 'commit')
print 'Tag Created:'
print tag._rawData
# Could not figure out how to look up the existing but decided against it
# anyhow as Jenkins shouldn't be rewriting git tags automatically. If a tag
# needs to be overwritten, it must first be manually deleted
# Delete the existing ref
#existing_ref = repo.get_git_ref('tag/%s' % TAG)
#if existing_ref:
# print 'Existing ref found, deleting it to set new one'
# existing_ref.delete()
ref = repo.create_git_ref('refs/tags/%s' % TAG, tag.sha)
print 'Ref Created:'
print ref._rawData
print 'SUCCESS'
if __name__ == '__main__':
try:
tag_to_tag()
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(1)
|
grimfang/quickShadows
|
src/game/input.py
|
Python
|
mit
| 4,080
| 0.002206
|
#!/usr/bin/python
################
# The MIT License (MIT)
#
# Copyright (c) <2013> <Martin de Bruyn>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
############################################################
#----------------------------------------------------------------------#
"""@ package Input
Keep all inputs here.
"""
# System imports
import logging as log
import sys
# Panda imports
from direct.showbase.InputStateGlobal import inputState
from direct.showbase.DirectObject import DirectObject
# MeoTech imports
#----------------------------------------------------------------------#
class InputHandler(DirectObject):
"""InputHandler.
Keyboard stuff
"""
def __init__(self, _game):
"""InputHandler INIT"""
# Game
self.game = _game
# Keyboard
inputState.watchWithModifiers('forward', 'w')
inputState.watchWithModifiers('left', 'a')
inputState.watchWithModifiers('reverse', 's')
inputState.watchWithModifiers('right', 'd')
inputState.watchWithModifiers('turnLeft', 'q')
inputState.watchWithModifiers('turnRight', 'e')
inputState.watchWithModifiers('space', 'space')
#inputState.watchWithModifiers('ctrl', 'lcontrol_down')
self.accept("mouse1", self.shootLight)
# App exit temp
base.accept("escape", sys.exit)
# mouse
self.winXhalf = base.win.getXSize()/2
self.winYhalf = base.win.getYSize()/2
# Should move the camera stuff to the baseCamera.py
base.camera.reparentTo(self.game.meotech.engine.GameObjects["player"].bulletBody)
base.camLens.setFov(90)
base.camLens.setNear(0.5)
self.mouseSpeedX = 15
self.mouseSpeedY = 0.2
self.camP = 10
def shootLight(self):
print "shoot"
cone = self.game.player.flashlightConeBody
base.messenger.send("shootLight", [cone])
def getMouse(self, dt):
player = self.game.meotech.engine.GameObjects["player"]
flashlight = self.game.player.flashlightConeBody
flashlight_lamp = self.game.player.flashlight
flashlight_light = self.game.player.flashlightLight
# Handle mouse
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
omega = (x - self.winXhalf)*-self.mouseSpeedX
player.bulletBody.node().setAngularMovement(omega)
#flashlight.setH(flashlight, base.camera.getH())
cam = base.cam.getP() - (y - self.winYhalf) * self.mouseSpeedY
flashlight.setHpr(ba
|
se.cam.getHpr())
if cam <-80:
cam = -80
elif cam > 90:
cam = 90
base.cam.setP(cam)
flashlight.setP(cam + 90)
flashlight_lamp.setZ(flashlight.getZ() - 0.6)
flashlight
|
_lamp.setY(flashlight.getY() - 0.55)
flashlight_light.setHpr(flashlight_lamp.find("LightPos").getHpr() + 90)
|
chennan47/osf.io
|
api/files/views.py
|
Python
|
apache-2.0
| 5,890
| 0.002207
|
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import NotFound
from framework.auth.oauth_scopes import CoreScopes
from osf.models import (
Guid,
BaseFileNode,
FileVersion,
QuickFilesNode
)
from api.base.exceptions import Gone
from api.base.permissions import PermissionWithGetter
from api.base.throttling import CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle
from api.base import utils
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ReadOnlyIfRegistration
from api.files.permissions import IsPreprintFile
from api.files.permissions import CheckedOutOrAdmin
from api.files.serializers import FileSerializer
from api.files.serializers import FileDetailSerializer, QuickFilesDetailSerializer
from api.files.serializers import FileVersionSerializer
class FileMixin(object):
"""Mixin with convenience methods for retrieving the current file based on the
current URL. By default, fetches the file based on the file_id kwarg.
"""
serializer_class = FileSerializer
file_lookup_url_kwarg = 'file_id'
def get_file(self, check_permissions=True):
try:
obj = utils.get_object_or_error(BaseFileNode, self.kwargs[self.file_lookup_url_kwarg], self.request, display_name='file')
except NotFound:
obj = utils.get_object_or_error(Guid, self.kwargs[self.file_lookup_url_kwarg], self.request).referent
if obj.is_deleted:
raise Gone(detail='The requested file is no longer available.')
if not isinstance(obj, BaseFileNode):
raise NotFound
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class FileDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_detail).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
IsPreprintFile,
CheckedOutOrAdmin,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'node'),
PermissionWithGetter(ReadOnlyIfRegistration, 'node'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileDetailSerializer
throttle_classes = (CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle, )
view_category = 'files'
view_name = 'file-detail'
def get_serializer_class(self):
try:
node = self.get_node()
except (NotFound, Gone):
return FileDetailSerializer
else:
if isinstance(node, QuickFilesNode):
return QuickFilesDetailSerializer
return FileDetailSerializer
def get_node(self):
return self.get_file().node
# overrides RetrieveAPIView
def get_object(self):
user = utils.get_user_auth(self.request).user
file = self.get_file()
if self.request.GET.get('create_guid', False):
# allows quickfiles to be given guids when another user wants a permanent link to it
if (self.get_node().has_permission(user, 'admin') and utils.has_admin_scope(self.request)) or file.node.is_quickfiles:
file.get_guid(create=True)
return file
class FileVersionsList(JSONAPIBaseView, generics.ListAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_versions).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'node'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'file-versions'
ordering = ('-modified',)
def get_queryset(self):
self.file = self.get_file()
return self.file.versions.all()
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
def node_from_version(request, view, obj):
return view.get_file(check_permissions=False).node
class FileVersionDetail(JSONAPIBaseView, generics.RetrieveAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_version_detail).
"""
version_lookup_url_kwarg = 'version_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, node_from_version)
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'version-detail'
# overrides RetrieveAPIView
def get_object(self):
self.file = self.get_file()
maybe_version = self.file.get_version(self.kwargs[self.version_lookup_url_kwarg])
# May raise a permission denied
# Kinda hacky but versions have no reference to node or file
self.check_object_permissions(self.request, file)
return utils.get_object_or_error(FileV
|
ersion, getattr(maybe_version, '_id', ''), self.request)
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context
|
(self)
context['file'] = self.file
return context
|
zimenglan-sysu-512/pose_action_caffe
|
results/pic_iou_curve.py
|
Python
|
mit
| 7,311
| 0.045137
|
#/usr/bin/env python
import os
import cv2
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
disp_n = 200
s_time = 3
radius = 3
thickness = 3
cls_color = (23, 119, 188)
colors = [
(0, 0, 255),
(0, 255, 0),
(255, 0, 0),
(23, 119, 188),
(222, 12, 39),
(122, 212, 139),
(20, 198, 68),
(111, 12, 139),
(131, 112, 179),
(31, 211, 79),
(131, 121, 179),
(31, 121, 192),
(192, 21, 92),
(192, 21, 192),
(216, 121, 92),
(16, 11, 62),
(16, 111, 162),
(96, 46, 12),
]
n_colors = len(colors)
def _mkdirs(path):
if not os.path.isdir(path):
os.makedirs(path)
# only one ground-truths for per image
def _read_gt(filepath):
'''format: imgidx objidx bbox cls'''
pd_dt = {}
pd_c = 0
fh = open(filepath)
for line in fh.readlines():
pd_c = pd_c + 1
line = line.strip()
info = line.split()
assert len(info) >= 1
imgidx, info = info[0], info[1:]
assert len(info) == 6
imgidx = imgidx.strip()
objidx = info[0].strip()
x1 = info[1].strip()
y1 = info[2].strip()
x2 = info[3].strip()
y2 = info[4].strip()
cls = info[5].strip()
objidx = int(objidx)
assert objidx == 0
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
pd_dt[imgidx] = [x1, y1, x2, y2]
fh.close()
assert pd_c == len(pd_dt.keys())
return pd_dt
# multiple or one for prediction
def _read_pd(filepath, in_dire, is_in_dire=False):
'''format: imgidx score bbox cls'''
gt_dt = {}
gt_c = 0
fh = open(filepath)
imgidxs = []
for line in fh.readlines():
gt_c = gt_c + 1
line = line.strip()
info = line.split()
assert len(info) >= 1
im_path, info = info[0], info[1:]
assert len(info) == 6
im_path = im_path.strip()
score = info[0].strip()
x1 = info[1].strip()
y1 = info[2].strip()
x2 = info[3].strip()
y2 = info[4].strip()
cls = info[5].strip()
if is_in_dire:
im_name = im_path[len(in_dire):]
else:
im_name = os.
|
path.basename(im_path)
imgidx = im_name.strip().rsplit(".", 1)[0]
imgidx = imgidx.strip()
if imgidx in imgidxs:
print imgidx, line
imgidxs.append(imgidx)
score = float(score)
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
gt_dt[imgidx] = [x1, y1, x2, y2]
fh.close()
print len(imgidxs)
print len(set(imgidxs))
assert gt_c == len(gt_dt.keys()), "gt_c: %s, n_keys: %s" \
% (gt_c, len(gt_dt.keys(
|
)))
return gt_dt
def _area(box):
assert len(box) == 4
w = box[2] - box[0] + 1
h = box[3] - box[1] + 1
a = w * h
assert a >= 0
return a
def _overlap(pd_box, gt_box):
pa = _area(pd_box)
ga = _area(gt_box)
x1 = max(pd_box[0], gt_box[0])
y1 = max(pd_box[1], gt_box[1])
x2 = min(pd_box[2], gt_box[2])
y2 = min(pd_box[3], gt_box[3])
if x1 > x2 or y1 > y2:
oa = 0
else:
oa = _area([x1, y1, x2, y2])
return oa / (pa + ga - oa + 0.0)
def _iou(pd_file, gt_file, in_dire, is_in_dire=False):
''''''
pd_dt = _read_pd(pd_file, in_dire, is_in_dire=is_in_dire)
gt_dt = _read_gt(gt_file)
assert len(pd_dt.keys()) == len(gt_dt.keys())
imgidxs = pd_dt.keys()
imgidxs.sort()
disp_c = 0
ovs = []
for imgidx in imgidxs:
disp_c += 1
if disp_c % disp_n == 0:
print "disp_c:", disp_c
pd_box = pd_dt[imgidx]
gt_box = gt_dt[imgidx]
ov = _overlap(pd_box, gt_box)
ovs.append(ov)
if disp_c % disp_n != 0:
print "disp_c:", disp_c
print "\n\nDone.\n\n"
return ovs
def _recall(ovs, thresolds):
n_ovs = len(ovs) # n_examples
n_thres = len(thresolds)
precision = np.zeros(n_thres) # np.zeros((n_thres,), dtype=np.int)
recall = np.zeros(n_thres) # np.zeros((n_thres,), dtype=np.int)
print recall.shape
for j in xrange(n_thres):
acc_c = 0
thres = thresolds[j]
for j2 in xrange(n_ovs):
ov = ovs[j2]
if ov > thres:
acc_c += 1
acc_c = acc_c / (n_ovs + 0.)
precision[j] = acc_c
recall[j] = acc_c
return recall
def _all_recall_pics(ovs_list, type_names, title, out_path=None, legend_loc="upper right"):
'''Plot Precision-Recall curve'''
plt.clf()
plt.grid(True)
plt.xlabel('IoU')
plt.ylabel('Recall')
# plt.ylim([0.0, 1.0])
# plt.xlim([0.5, 1.0])
n_dataset = len(ovs_list)
assert n_dataset == len(type_names)
thresolds = [j / 100.0 for j in xrange(50, 101, 1)]
for j in xrange(n_dataset):
ovs = ovs_list[j]
name = type_names[j]
recall = _recall(ovs, thresolds)
plt.plot(thresolds, recall, label=name)
plt.xticks(np.arange(0.50, 1.01, 0.05))
plt.yticks(np.arange(0.0, 1.01, 0.1))
plt.title(title)
plt.legend(loc=legend_loc)
plt.savefig(out_path)
if out_path is None:
plt.show()
else:
plt.savefig(out_path)
def torso_run():
''''''
ovs_list = []
type_names = []
out_path = "/pathTo/../res.pics/torso.recall.png"
## flic test
pd_file = "/pathTo/../dataset/FLIC/vision/flic_torso_test.txt"
gt_file = "/pathTo/../dataset/FLIC/labels/crop_test_torso_labels2.txt"
in_dire = "/pathTo/../dataset/FLIC/crop.images2/test/"
is_in_dire = False
type_names.append("FLIC Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## bbc pose -> test & val
pd_file = "/pathTo/../dataset/bbc_pose/torso_masks/test_torso_results.txt"
gt_file = "/pathTo/../dataset/bbc_pose/labels/crop_test_torso.label"
in_dire = "/pathTo/../dataset/bbc_pose/crop.data/"
is_in_dire = True
type_names.append("BBC Pose Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## kinect2
pd_file = "/pathTo/../dataset/Kinect2/torso_masks/test_torso_results.txt"
gt_file = "/pathTo/../dataset/Kinect2/labels/up.crop.color2_test_torso_l7.log"
in_dire = "/pathTo/../dataset/Kinect2/up.crop.color/"
is_in_dire = False
type_names.append("Kinect2 Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
# pic -> viz
title = 'Recall for Torso Detection'
_all_recall_pics(ovs_list, type_names, title, out_path=out_path)
def person_run():
''''''
ovs_list = []
type_names = []
out_path = "/pathTo/../res.pics/person.recall.png"
## bbc pose -> test & val
pd_file = "/pathTo/../dataset/bbc_pose/test_person_results.txt"
gt_file = "/pathTo/../dataset/bbc_pose/labels/pbbox_test_cls.txt"
in_dire = "/pathTo/../dataset/bbc_pose/data/"
is_in_dire = True
type_names.append("BBC Pose Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## kinect2
pd_file = "/pathTo/../dataset/Kinect2/test_person_results.txt"
gt_file = "/pathTo/../dataset/Kinect2/labels/up.color2.pbbox.test.log"
in_dire = "/pathTo/../dataset/Kinect2/up.color/"
is_in_dire = False
type_names.append("Kinect2 Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
# pic -> viz
title = 'Recall for Person Detection'
_all_recall_pics(ovs_list, type_names, title, out_path=out_path, legend_loc="lower left")
if __name__ == '__main__':
''''''
# torso_run()
person_run()
|
prechelt/typecheck-decorator
|
setup.py
|
Python
|
bsd-2-clause
| 3,969
| 0.007055
|
# based on https://github.com/pypa/sampleproject/blob/master/setup.py
# see http://packaging.python.org/en/latest/tutorial.html#creating-your-own-project
from setuptools import setup, find_packages
from setuptools.command.install import install as stdinstall
import codecs
import os
import re
import sys
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_file_contents(filename):
with codecs.open(filename, encoding='utf-8') as f:
contents = f.read()
return contents
package_name = "typechec
|
k-decorator"
class install_with_test(stdinstall):
def run(self):
stdinstall.run(self) # normal install
##pip/setuptools makes this unbuffering unhelpful:
#sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1) # m
|
ake line-buffered
#sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1) # make line-buffered
#import typecheck.test_typecheck_decorator # execute post-install test (during beta only)
setup(
# setup customization:
cmdclass={'install': install_with_test},
# basic information:
name=package_name,
version=find_version('typecheck', '__init__.py'),
description="flexible explicit run-time type checking of function arguments (Python3-only)",
long_description=get_file_contents("README.rst"),
# The project URL:
url='http://github.com/prechelt/' + package_name,
# Author details:
author='Dmitry Dvoinikov, Lutz Prechelt',
author_email='prechelt@inf.fu-berlin.de',
# Classification:
license='BSD License',
classifiers=[
'License :: OSI Approved :: BSD License',
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Documentation',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='type-checking',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ['typing;python_version<"3.5"'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'typecheck': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
###data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
### entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
MartinHjelmare/home-assistant
|
homeassistant/helpers/script.py
|
Python
|
apache-2.0
| 12,633
| 0
|
"""Helpers to execute scripts."""
import logging
from contextlib import suppress
from itertools import islice
from typing import Optional, Sequence
import voluptuous as vol
from homeassistant.core import HomeAssistant, Context, callback
from homeassistant.const import CONF_CONDITION, CONF_TIMEOUT
from homeassistant import exceptions
from homeassistant.helpers import (
service, condition, template as template,
config_validation as cv)
from homeassistant.helpers.event import (
async_track_point_in_utc_time, async_track_template)
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as date_util
from homeassistant.util.async_ import (
run_coroutine_threadsafe, run_callback_threadsafe)
_LOGGER = logging.getLogger(__name__)
CONF_ALIAS = 'alias'
CONF_SERVICE = 'service'
CONF_SERVICE_DATA = 'data'
CONF_SEQUENCE = 'sequence'
CONF_EVENT = 'event'
CONF_EVENT_DATA = 'event_data'
CONF_EVENT_DATA_TEMPLATE = 'event_data_template'
CONF_DELAY = 'delay'
CONF_WAIT_TEMPLATE = 'wait_template'
CONF_CONTINUE = 'continue_on_timeout'
ACTION_DELAY = 'delay'
ACTION_WAIT_TEMPLATE = 'wait_template'
ACTION_CHECK_CONDITION = 'condition'
ACTION_FIRE_EVENT = 'event'
ACTION_CALL_SERVICE = 'call_service'
def _determine_action(action):
"""Determine action type."""
if CONF_DELAY in action:
return ACTION_DELAY
if CONF_WAIT_TEMPLATE in action:
return ACTION_WAIT_TEMPLATE
if CONF_CONDITION in action:
return ACTION_CHECK_CONDITION
if CONF_EVENT in action:
return ACTION_FIRE_EVENT
return ACTION_CALL_SERVICE
def call_from_config(hass: HomeAssistant, config: ConfigType,
variables: Optional[Sequence] = None,
context: Optional[Context] = None) -> None:
"""Call a script based on a config entry."""
Script(hass, cv.SCRIPT_SCHEMA(config)).run(variables, context)
class _StopScript(Exception):
"""Throw if script needs to stop."""
class _SuspendScript(Exception):
"""Throw if script needs to suspend."""
class Script():
"""Representation of a script."""
def __init__(self, hass: HomeAssistant, sequence, name: str = None,
change_listener=None) -> None:
"""Initialize the script."""
self.hass = hass
self.sequence = sequence
template.attach(hass, self.sequence)
self.name = name
self._change_listener = change_listener
self._cur = -1
self._exception_step = None
self.last_action = None
self.last_triggered = None
self.can_cancel = any(CONF_DELAY in action or CONF_WAIT_TEMPLATE
in action for action in self.sequence)
self._async_listener = []
self._template_cache = {}
self._config_cache = {}
self._actions = {
ACTION_DELAY: self._async_delay,
ACTION_WAIT_TEMPLATE: self._async_wait_template,
ACTION_CHECK_CONDITION: self._async_check_condition,
ACTION_FIRE_EVENT: self._async_fire_event,
ACTION_CALL_SERVICE: self._async_call_service,
}
@property
def is_running(self) -> bool:
"""Return true if script is on."""
return self._cur != -1
def run(self, variables=None, context=None):
"""Run script."""
run_coroutine_threadsafe(
self.async_run(variables, context), self.hass.loop).result()
async def async_run(self, variables: Optional[Sequence] = None,
context: Optional[Context] = None) -> None:
"""Run script.
This method is a coroutine.
"""
self.last_triggered = date_util.utcnow()
if self._cur == -1:
self._log('Running script')
self._cur = 0
# Unregister callback if we were in a delay or wait but turn on is
# called again. In that case we just continue execution.
self._async_remove_listener()
for cur, action in islice(enumerate(self.sequence), self._cur, None):
try:
await self._handle_action(action, variables, context)
except _SuspendScript:
# Store next step to take and notify change listeners
self._cur = cur + 1
if self._change_listener:
self.hass.async_add_job(self._change_listener)
return
except _StopScript:
break
except Exception:
# Store the step that had an exception
self._exception_step = cur
# Set script to not running
self._cur = -1
self.last_action = None
# Pass exception on.
raise
# Set script to not-running.
self._cur = -1
self.last_action = None
if self._change_listener:
self.hass.async_add_job(self._change_listener)
def stop(self) -> None:
"""Stop running script."""
run_callback_threadsafe(self.hass.loop, self.async_stop).result()
def async_stop(self) -> None:
"""Stop running script."""
if self._cur == -1:
return
self._cur = -1
self._async_remove_listener()
if self._change_listener:
self.hass.async_add_job(self._change_listener)
@callback
def async_log_exception(self, logger, message_base, exception):
"""Log an exception for this script.
Should only be called on exceptions raised by this scripts async_run.
"""
# pylint: disable=protected-access
step = self._exception_step
action = self.sequence[step]
action_type = _determine_action(action)
error = None
meth = logger.error
if isinstance(exception, vol.Invalid):
error_desc = "Invalid data"
elif isinstance(exception, exceptions.TemplateError):
error_desc = "Error rendering template"
elif isinstance(exception, exceptions.Unauthorized):
error_desc = "Unauthorized"
elif isinstance(exception, exceptions.ServiceNotFound):
error_desc = "Service not found"
else:
# Print the full stack trace, unknown error
error_desc = 'Unknown error'
meth = l
|
ogger.exception
error = ""
if error is None:
error = str(exception)
meth("%s. %s for %s at pos %s: %s",
message_base, error_desc, action_type, step + 1, error)
async def _handle_action(self, action, variables, context):
"""Handle an action."""
await self._actions[_determine_action(action)](
action, variables, context)
|
async def _async_delay(self, action, variables, context):
"""Handle delay."""
# Call ourselves in the future to continue work
unsub = None
@callback
def async_script_delay(now):
"""Handle delay."""
# pylint: disable=cell-var-from-loop
with suppress(ValueError):
self._async_listener.remove(unsub)
self.hass.async_create_task(
self.async_run(variables, context))
delay = action[CONF_DELAY]
try:
if isinstance(delay, template.Template):
delay = vol.All(
cv.time_period,
cv.positive_timedelta)(
delay.async_render(variables))
elif isinstance(delay, dict):
delay_data = {}
delay_data.update(
template.render_complex(delay, variables))
delay = cv.time_period(delay_data)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error("Error rendering '%s' delay template: %s",
self.name, ex)
raise _StopScript
self.last_action = action.get(
CONF_ALIAS, 'delay {}'.format(delay))
self._log("Executing step %s" % self.last_action)
unsub = async_track_point_in_utc_time(
self.hass, async_script_delay,
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-rvcheck/package.py
|
Python
|
lgpl-2.1
| 958
| 0.003132
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
fr
|
om spack import *
class RRvcheck(RPackage):
"""Chec
|
k latest release version of R and R package (both in 'CRAN',
'Bioconductor' or 'Github')."""
homepage = "https://cloud.r-project.org/package=rvcheck"
url = "https://cloud.r-project.org/src/contrib/rvcheck_0.0.9.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rvcheck"
version('0.1.3', sha256='0b59986c1ccc5b89f8aca8fa7cf62d0b875719addb40e08dbda1791cfd334fc4')
version('0.0.9', sha256='6e7be7b029d28181a1b57ebd4d25978f3459722ffdb45a3698157a7f943bea92')
depends_on('r@3.3.0:', when='@:0.1.1', type=('build', 'run'))
depends_on('r@3.4.0:', when='@0.1.3:', type=('build', 'run'))
depends_on('r-rlang', when='@0.1.1:', type=('build', 'run'))
|
davmre/bayesflow
|
elbow/util/__init__.py
|
Python
|
bsd-3-clause
| 77
| 0
|
import numpy as np
import te
|
nsorflow as tf
import dists
from misc import *
| |
chrislit/abydos
|
abydos/distance/_roberts.py
|
Python
|
gpl-3.0
| 3,233
| 0
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; wi
|
thout even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._roberts.
Roberts similarity
"""
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Roberts']
clas
|
s Roberts(_TokenDistance):
r"""Roberts similarity.
For two multisets X and Y drawn from an alphabet S, Roberts similarity
:cite:`Roberts:1986` is
.. math::
sim_{Roberts}(X, Y) =
\frac{\Big[\sum_{i \in S} (X_i + Y_i) \cdot
\frac{min(X_i, Y_i)}{max(X_i, Y_i)}\Big]}
{\sum_{i \in S} (X_i + Y_i)}
.. versionadded:: 0.4.0
"""
def __init__(
self, tokenizer: Optional[_Tokenizer] = None, **kwargs: Any
) -> None:
"""Initialize Roberts instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
.. versionadded:: 0.4.0
"""
super(Roberts, self).__init__(tokenizer=tokenizer, **kwargs)
def sim(self, src: str, tar: str) -> float:
"""Return the Roberts similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Roberts similarity
Examples
--------
>>> cmp = Roberts()
>>> cmp.sim('cat', 'hat')
0.5
>>> cmp.sim('Niall', 'Neil')
0.36363636363636365
>>> cmp.sim('aluminum', 'Catalan')
0.11764705882352941
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
alphabet = self._total().keys()
return sum(
(self._src_tokens[i] + self._tar_tokens[i])
* min(self._src_tokens[i], self._tar_tokens[i])
/ max(self._src_tokens[i], self._tar_tokens[i])
for i in alphabet
) / sum((self._src_tokens[i] + self._tar_tokens[i]) for i in alphabet)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Ebag333/Pyfa
|
eos/effects/subsystembonusgallenteelectronic2tractorbeamvelocity.py
|
Python
|
gpl-3.0
| 458
| 0.004367
|
# subSystemBonusGallente
|
Electronic2TractorBeamVelocity
#
# Used by:
# Subsystem: Proteus Electronics - Emergent Locus Analyzer
type = "passive"
def handler(fit, module, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Tractor Beam",
"maxTractorVel
|
ocity", module.getModifiedItemAttr("subsystemBonusGallenteElectronic2"),
skill="Gallente Electronic Systems")
|
akhilari7/pa-dude
|
lib/python2.7/site-packages/image/video_field.py
|
Python
|
mit
| 1,087
| 0
|
from django.db import models
from django.db.models.fields.files import FieldFile
from django.core.files import File
def get_video_dimensions(path):
from ffvideo import VideoStream
vs = VideoStream(path)
return (vs.frame_width, vs.frame_height)
class VideoFile(File):
"""
A mixin for use alongside django.core.file
|
s.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_video_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_video_dimensions()[1]
height = property(_get_height)
def _get_video_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_c
|
ache = get_video_dimensions(self.path)
return self._dimensions_cache
# A video field is exactly a file field with a different signature
class VideoFieldFile(VideoFile, FieldFile):
pass
class VideoField(models.FileField):
attr_class = VideoFieldFile
|
OuterDeepSpace/OuterDeepSpace
|
generators/osgen2.py
|
Python
|
gpl-2.0
| 27,347
| 0.037774
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygame, pygame.draw, pygame.event
from pygame.locals import *
import random, math, sys
# galaxy specification
sectorSize = [10, 10]
sectorsOffset = [0, 0]
galaxyID = 'Center90'
sectorsSpec = [
[ (0,0), (9,1), (0,0)],
[ (9,1), (9,0), (9,1)],
[ (0,0), (9,1), (0,0)],
]
if 0: # small galaxy
galaxyID = 'Circle4P'
galaxyCenter = (20.0, 20.0)
galaxyRadius = 20.0
galaxyStartR = (14.0, 16.0)
galaxyPlayers = 16
galaxyPlayerGroup = 2
galaxyGroupDist = 1.0
galaxyMinR = 5
galaxyDensity = {5: 3, 10: 3, 15: 3}
galaxyResources = {
# format resourceID : (minDist, maxDist, number of resources)
1 : (12, 15, 0), # TL 1 + 2
2 : (12, 15, 0), # TL 1 + 2
3 : (8, 11, 0), # TL 3 + 4
4 : (8, 11, 0), # TL 3 + 4
5 : (8, 11, 0), # TL 3 + 4
6 : (5, 6, 0), # TL 5
7 : (5, 6, 0), # TL 5
8 : (5, 6, 0), # TL 5
}
galaxyDiseases = {
# format diseaseID : (minDist, maxDist, number of diseases)
1 : (12, 15, 0), # TL 1 + 2
2 : (12, 15, 0), # TL 1 + 2
3 : (8, 11, 0), # TL 3 + 4
4
|
: (8, 11, 0), # TL 3 + 4
5 : (8, 11, 0), # TL 3 + 4
6 : (5, 6, 0), # TL 5
7 : (5, 6, 0), # TL 5
8 : (5, 6, 0), # TL 5
}
if 0: # THIS IS THE RECOMENDED MEDIUM GALAXY
galaxyID = 'Circle42P'
galaxyCenter = (50.0, 50.0)
galaxyRadius = 50.0
galaxyStartR = (32.0, 36.0)
#galaxyPlayers = 30
#galaxyPlayerGroup = 2
galaxyPlayers = 42
galaxyPlayerGroup = 3
galaxyGroupDist = 4.0
galaxyMinR = 7.5
galaxyDensity = {7.5: 3, 10: 4, 20: 5, 30: 5.5, 40: 6, 50: 6}
ga
|
laxyResources = {
# format resourceID : (minDist, maxDist, number of resources)
1 : (20, 45, 15), # TL 1 + 2
2 : (20, 45, 15), # TL 1 + 2
3 : (8, 15, 7), # TL 3 + 4
4 : (8, 15, 7), # TL 3 + 4
5 : (8, 15, 7), # TL 3 + 4
6 : (7.5, 9, 1), # TL 5
7 : (7.5, 9, 1), # TL 5
8 : (7.5, 9, 1), # TL 5
}
galaxyDiseases = {
# format diseaseID : (minDist, maxDist, number of diseases)
1 : (20, 45, 8), # TL 1 + 2
2 : (20, 45, 8), # TL 1 + 2
3 : (5, 15, 4), # TL 3 + 4
4 : (5, 15, 4), # TL 3 + 4
5 : (5, 15, 4), # TL 3 + 4
6 : (0, 5, 1), # TL 5
7 : (0, 5, 1), # TL 5
8 : (0, 5, 1), # TL 5
}
if 0: # Large Galaxy
galaxyID = 'Circle65P'
galaxyCenter = (75.0, 75.0)
galaxyRadius = 75.0
galaxyStartR = (45.0, 52.5)
galaxyPlayers = 65
galaxyPlayerGroup = 5
#galaxyPlayers = 48
#galaxyPlayerGroup = 4
galaxyGroupDist = 8
galaxyMinR = 7.5
galaxyDensity = {7.5: 3, 10: 4, 20: 5, 30: 5.5, 60: 6, 75: 6}
galaxyResources = {
# format resourceID : (minDist, maxDist, number of resources)
1 : (20, 67.5, 45), # TL 1 + 2
2 : (20, 67.5, 45), # TL 1 + 2
3 : (10, 20, 10), # TL 3 + 4
4 : (10, 20, 10), # TL 3 + 4
5 : (10, 20, 10), # TL 3 + 4
6 : (7.5, 9, 1), # TL 5
7 : (7.5, 9, 1), # TL 5
8 : (7.5, 9, 1), # TL 5
}
galaxyDiseases = {
# format diseaseID : (minDist, maxDist, number of diseases)
1 : (20, 67.5, 16), # TL 1 + 2
2 : (20, 67.5, 16), # TL 1 + 2
3 : (5, 15, 4), # TL 3 + 4
4 : (5, 15, 4), # TL 3 + 4
5 : (5, 15, 4), # TL 3 + 4
6 : (0, 5, 1), # TL 5
7 : (0, 5, 1), # TL 5
8 : (0, 5, 1), # TL 5
}
class Galaxy:
def __init__(self):
self.systems = []
self.centerX = 0.0
self.centerY = 0.0
self.radius = 0.0
class System:
def __init__(self):
self.x = 0.0
self.y = 0.0
self.name = '?'
self.compOf = None
self.starClass = '?'
self.starSubclass = 0
self.planets = []
self._closest = []
self.hasSR = 0
self.hasDisease = 0
self._moveable = 1
class Planet:
def __init__(self):
self.compOf = None
self.type = '?'
self.diameter = 0
self.minerals = 0
self.environ = 0
self.energy = 0
self.slots = 0
self.maxSlots = 0
self.starting = 0
self.strategicRes = 0
self.disease = 0
def generateGalaxy(galaxy):
secX = 0
for sectors in sectorsSpec:
secY = 0
for sector, starting in sectors:
minX = secX * sectorSize[0] + sectorsOffset[0]
maxX = minX + sectorSize[0]
minY = secY * sectorSize[1] + sectorsOffset[1]
maxY = minY + sectorSize[1]
for i in xrange(0, sector):
system = System()
galaxy.systems.append(system)
system.x = random.uniform(minX, maxX)
system.y = random.uniform(minY, maxY)
system.compOf = galaxy
generateSystem(system)
for i in xrange(0, starting):
x = random.uniform(minX, maxX)
y = random.uniform(minY, maxY)
galaxy.systems.append(generateStartingSystem(galaxy, x, y))
secY += 1
secX += 1
def generateStartingSystem(galaxy, x, y):
while 1:
system = System()
system.x = x
system.y = y
system.compOf = galaxy
generateSystem(system)
# check system properties
e = 0
h = 0
d = 0
ok = 1
for planet in system.planets:
if planet.type == 'E': e += 1; planet.starting = 1
elif planet.type in ('D', 'R', 'C'):
if planet.slots > 5: d += 1
else: ok = 0; break
elif planet.type == 'H': h += 1
elif planet.type == 'M': ok = 0; break
if ok and e == 1 and h == 1 and d == 1:
break
return system
def generateGalaxy2(galaxy):
galaxy.centerX = galaxyCenter[0]
galaxy.centerY = galaxyCenter[1]
galaxy.radius = galaxyRadius
r = galaxyMinR + random.uniform(0, 0.5)
dkeys = galaxyDensity.keys()
dkeys.sort()
dkeys.reverse()
prevR = 5
while r <= galaxyRadius:
for key in dkeys:
if key <= r:
density = galaxyDensity[key]
break
print r, density
d = 2 * math.pi * r
aoff = random.uniform(0, math.pi * 2)
dangle = density / d * math.pi * 0.9
for i in range(0, d / density):
angle = aoff + i * density / d * math.pi * 2
angle += random.uniform(-dangle, dangle)
tr = random.uniform(prevR + 0.1, r)
while 1:
acceptable = 0
system = System()
generateSystem(system)
# check requirements
for planet in system.planets:
if planet.type in ('D', 'R', 'C', 'H', 'M', 'E') and \
planet.slots > 0:
acceptable = 1
break
if acceptable:
break
galaxy.systems.append(system)
system.x = math.cos(angle) * tr + galaxyCenter[0]
system.y = math.sin(angle) * tr + galaxyCenter[1]
system.compOf = galaxy
system.dist = tr
system.angle = angle
prevR = r
r += random.uniform(2, 4)
# generate central black hole
system = System()
system.x = galaxyCenter[0]
system.y = galaxyCenter[1]
system.starClass = "b-"
system.starSubclass = 7
system.compOf = galaxy
system._moveable = 0
galaxy.systems.append(system)
# generate starting systems
if galaxyPlayers:
r = (galaxyStartR[0] + galaxyStartR[1]) / 2
d = 2 * math.pi * r
print "Player distance:", d / galaxyPlayers
gaoff = random.uniform(0, math.pi * 2)
for i in range(0, galaxyPlayers / galaxyPlayerGroup):
print "Placing group:", i + 1, "of", galaxyPlayers / galaxyPlayerGroup
angle = gaoff + i * math.pi * 2 / (galaxyPlayers / galaxyPlayerGroup)
tr = random.uniform(galaxyStartR[0], galaxyStartR[1])
gx = math.cos(angle) * tr + galaxyCenter[0]
gy = math.sin(angle) * tr + galaxyCenter[1]
aoff = random.uniform(0, math.pi * 2)
for j in range(0, galaxyPlayerGroup):
angle = aoff + j * math.pi * 2 / galaxyPlayerGroup
x = math.cos(angle) * galaxyGroupDist + gx
y = math.sin(angle) * galaxyGroupDist + gy
system = generateStartingSystem(galaxy, x, y)
galaxy.systems.append(system)
# strategic resources
keys = galaxyResources.keys()
keys.sort()
keys.reverse()
for key in keys:
print "Placing resource", key
minR, maxR, count = galaxyRe
|
TobyRoseman/PS4M
|
engine/sourceManager.py
|
Python
|
mit
| 1,630
| 0.007362
|
from data.database.sourceGroupAssignmentTable import getSourceIdToAssignedGroups
from data.database.sourceGroupTable import getAllSourceGroupNames
from data.database.sourceTable import getAllSources
(categoryToSourceObjects, sourceCategoryNames, sourceIdToAssignments, sourceIdToSourceObject,
unCategorizedSource) = ({}, None, None, None, None)
def getSourceById(sourceId):
return sourceIdToSourceObject[sourceId]
def getSourceCategoryNames():
return sourceCategoryNames
def getSources(categoryName):
return categoryToSourceObjects[categoryName]
def getUncategorizedSource():
return unCategorizedSource
def _addToCategoryLookup(source):
global categoryToSourceObjects
for c in source.categories:
if c in categoryToSourceObjects:
categoryToSourceObjects[c].append(source)
else:
categoryToSourceObjects[c] = [source]
def __sourceToCategorys(source):
source_id = source.lookupId
if (source_id in sourceIdToAssignments):
return sourceIdToAssignments[source_id]
else:
return []
def initSourceManager():
global sourceCategoryNames, sourceIdToAssignments, sourceIdToSourceObject, unCategorizedSource
unCategorizedSource = []
sourceCategoryNames = getAllSourceGroupNames()
sourceIdToAssignments = getSourceIdToAssignedGroups()
sourceIdToSourceObject = {}
for s in getAllSources():
s.categories = __sourceToCategorys(s)
sourceIdToSourceObje
|
ct[s.lookupId] = s
if(len(s.ca
|
tegories) != 0):
_addToCategoryLookup(s)
else:
unCategorizedSource.append(s)
|
MOOCworkbench/MOOCworkbench
|
user_manager/views.py
|
Python
|
mit
| 7,680
| 0.004297
|
import logging
import math
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import View
from experiments_manager.models import Experiment
from marketplace.models import (ExternalPackage, InternalPackage, Package,
PackageResource, PackageVersion)
from .forms import RegisterForm, WorkbenchUserForm
from .models import WorkbenchUser, get_workbench_user
logger = logging.getLogger(__name__)
@login_required
def index(request):
workbench_user = WorkbenchUser.objects.get(user=request.user)
experiments = Experiment.objects.filter(owner=workbench_user).order_by('-created')[:5]
packages = InternalPackage.objects.filter(owner=workbench_user).order_by('-created')[:5]
logger.info('%s accessed index', workbench_user)
recent_versions = list(PackageVersion.objects.all().order_by('-created')[:5])
recent_resources = list(PackageResource.objects.all().order_by('-created')[:5])
recent_internal = list(InternalPackage.objects.all().order_by('-created')[:5])
recent_external = list(ExternalPackage.objects.all().order_by('-created')[:5])
recent_experiments = list(Experiment.objects.filter(public=True).order_by('created')[:5])
total_list = recent_versions + recent_resources + recent_internal + recent_external + recent_experiments
total_list = reversed(sorted(total_list, key=lambda x: x.created))
return render(request, 'index.html', {'experiments': experiments,
'packages': packages,
'activities': total_list})
class DetailProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
return render(request, "user_manager/workbenchuser_detail.html", {'workbench_user': workbench_user})
class EditProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(instance=workbench_user)
logger.info('%s edit get profile view', workbench_user)
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def post(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(request.POST, instance=workbench_user)
if form.is_valid():
current_password = form.cleaned_data['current_password']
user = workbench_user.user
if current_password:
if user.check_password(current_password) and change_password_of_user(workbench_user, form):
messages.add_message(request, messages.SUCCESS, 'Your password has been changed.')
else:
messages.add_message(request, messages.ERROR, 'Passwords did not match '
'or incorrect current password.')
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
form.save()
logger.info('%s edited profile successfully', workbench_user)
return redirect(to='/')
else:
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def change_password_of_user(w_user, form):
new_password = form.cleaned_data['new_password']
new_password_again = form.cleaned_data['new_password_again']
if new_password == new_password_again:
user = w_user.user
user.set_password(new_password)
user.save()
return True
return False
class RegisterView(View):
def get(self, request):
form = RegisterForm()
return render(request, 'user_manager/register.html', {'form': form})
def post(self, request):
form = RegisterForm(self.request.POST)
if form.is_valid():
new_email = form.cleaned_data['email']
if not existing_user_check(new_email):
user = User.objects.create_user(form.cleaned_data['username'],
new_email,
form.cleaned_data['password'])
workbench_user = WorkbenchUser.objects.get(user=user)
workbench_user.netid = form.cleaned_data['netid']
workbench_user.save()
logger.info('new user created: %s', workbench_user)
return redirect(to='/')
else:
return render(request, 'user_manager/register.html', {'form': form})
else:
return render(request, 'user_manager/register.html', {'form': form})
def existing_user_check(email_address):
return User.objects.filter(email=email_address)
class WorkbenchUserDetailView(View):
def get(self, request, username):
workbench_user = get_object_or_404(WorkbenchUser, user__username=username)
recent_experiments = Experiment.objects.filter(owner=workbench_user, completed=True).order_by('-created')[:5]
recent_packages = Package.objects.filter(owner=workbench_user).order_by('-created')[:5]
return render(request, "user_manager/user_profile.html", {'w_user': workbench_user,
|
'experiments': recent_experiments
|
,
'packages': recent_packages})
@login_required
def search(request):
if 'q' in request.GET:
q = request.GET.get('q')
page = request.GET.get('page')
page = int(page) if page is not None else 1
results, nr_of_pages = get_search_results(request.user, q, page)
return render(request, 'search.html', {'results': results, 'query': q, 'page': page,
'next_page': page + 1,
'previous_page': page - 1,
'nr_of_pages': nr_of_pages,
'nr_of_pages_range': range(1, nr_of_pages+1)})
return render(request, 'search.html', {})
def get_search_results(user, q, page_nr=1, page_size=25):
start_value = (page_nr - 1) * page_size
end_value = start_value + page_size
search_query_list = build_search_queries(q, user)
total_count = sum([x.count() for x in search_query_list])
nr_of_pages = int(math.ceil(total_count / page_size))
total_list = [list(x.order_by('-created')[start_value:end_value]) for x in search_query_list]
total_flat_list = [item for sublist in total_list for item in sublist]
total_flat_list = sorted(total_flat_list, key=lambda x: x.created)
return total_flat_list, nr_of_pages
def build_search_queries(q, user):
package_version_query = PackageVersion.objects.filter(version_nr__contains=q)
package_resource_query = PackageResource.objects.filter(title__contains=q)
internal_package_query = InternalPackage.objects.filter(name__contains=q)
external_package_query = ExternalPackage.objects.filter(name__contains=q)
users_query = WorkbenchUser.objects.filter(user__username=q)
experiment_query = Experiment.objects.filter(Q(owner__user=user, title__contains=q) |
Q(completed=True, title__contains=q))
return package_version_query, package_resource_query, internal_package_query, external_package_query, \
experiment_query, users_query
|
alphacharlie/mlxd
|
mlxview.py
|
Python
|
gpl-2.0
| 3,859
| 0.007256
|
#!/usr/bin/env python
#Demo code
#
# simple demonstration script showing real-time thermal Imaging
# using the MLX90620 16x4 thermopile array and the mlxd daemon
#
# Copyright (C) 2015 Chuck Werbick
#
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - S
|
uite 330, Boston, MA 02111-1307, USA.
import time
import picamera
import numpy as
|
np
import subprocess
import os, sys
import datetime
import skimage
from skimage import io, exposure, transform, img_as_float, img_as_ubyte
from time import sleep
import matplotlib
import matplotlib.pyplot as plt
# IR registration parameters
ROT = np.deg2rad(90)
SCALE = (36.2, 36.4)
OFFSET = (530, 170)
def getImage():
fn = r'/home/pi/tmp.jpg';
proc = subprocess.Popen('raspistill -o %s -w 640 -h 480 -n -t 3' %(fn),
shell=True, stderr=subprocess.STDOUT)
proc.wait()
im = io.imread(fn, as_grey=True)
im = exposure.equalize_hist(im)
return skimage.img_as_ubyte(im)
im = getImage()
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 20
camera.start_preview()
# get the temperature array, and align with the image
fifo = open('/var/run/mlx90620.sock', 'r')
# get the whole FIFO
ir_raw = fifo.read()
# trim to 128 bytes
ir_trimmed = ir_raw[0:128]
# go all numpy on it
ir = np.frombuffer(ir_trimmed, np.uint16)
# set the array shape to the sensor shape (16x4)
ir = ir.reshape((16, 4))[::-1, ::-1]
ir = img_as_float(ir)
# stretch contrast on our heat map
p2, p98 = np.percentile(ir, (2, 98))
ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
# increase even further? (optional)
# ir = exposure.equalize_hist(ir)
# turn our array into pretty colors
cmap = plt.get_cmap('spectral')
rgba_img = cmap(ir)
rgb_img = np.delete(rgba_img, 3, 2)
# align the IR array with the camera
tform = transform.AffineTransform(scale=SCALE, rotation=ROT, translation=OFFSET)
ir_aligned = transform.warp(rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
# turn it back into a ubyte so it'll display on the preview overlay
ir_byte = img_as_ubyte(ir_aligned)
#add the overlay
o = camera.add_overlay(np.getbuffer(ir_byte), layer=3, alpha=90)
#update loop
while True:
sleep(0.25)
ir_raw = fifo.read()
ir_trimmed = ir_raw[0:128]
ir = np.frombuffer(ir_trimmed, np.uint16)
ir = ir.reshape((16, 4))[::-1, ::-1]
ir = img_as_float(ir)
p2, p98 = np.percentile(ir, (2, 98))
ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
ir = exposure.equalize_hist(ir)
cmap = plt.get_cmap('spectral')
rgba_img = cmap(ir)
rgb_img = np.delete(rgba_img, 3, 2)
# align the IR array with the image
tform = transform.AffineTransform(scale=SCALE, rotation=ROT, translation=OFFSET)
ir_aligned = transform.warp(rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
ir_byte = img_as_ubyte(ir_aligned)
o.update(np.getbuffer(ir_byte))
print('Error! Closing...')
camera.remove_overlay(o)
fifo.close()
|
agnethesoraa/recipemaster
|
recipemaster/recipes/migrations/0003_auto_20150325_2130.py
|
Python
|
mit
| 820
| 0.002439
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
f
|
rom django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0002_recipecollection_title'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, pri
|
mary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='recipe',
name='tags',
field=models.ManyToManyField(to='recipes.Tag', related_name='recipes'),
preserve_default=True,
),
]
|
leifurhauks/grpc
|
src/python/grpcio/tests/unit/beta/test_utilities.py
|
Python
|
bsd-3-clause
| 2,422
| 0.002064
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this lis
|
t of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of
|
its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test-appropriate entry points into the gRPC Python Beta API."""
import grpc
from grpc.beta import implementations
def not_really_secure_channel(
host, port, channel_credentials, server_host_override):
"""Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host to which to connect.
channel_credentials: The implementations.ChannelCredentials with which to
connect.
server_host_override: The target name used for SSL host name checking.
Returns:
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
target = '%s:%d' % (host, port)
channel = grpc.secure_channel(
target, channel_credentials._credentials,
((b'grpc.ssl_target_name_override', server_host_override,),))
return implementations.Channel(channel)
|
PredictionIO/Demo-AngelList
|
backend/angellist_demo/urls.py
|
Python
|
apache-2.0
| 840
| 0.009524
|
from django.conf.urls.defaults import patterns, include, url
from rest_framework.urlpatterns import format_suffix_patterns
from startups import views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('startups.views',
# Examples:
# url(r'^$', 'angellist_demo.views.home', name='home'),
# url(r'^angellist_demo/', include('angellist_demo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^startups$', 'startup_list'),
url(r'^startups/(?P<pk>[0-9]+)$', 'startup_detail'),
)
|
urlpatterns = format_suff
|
ix_patterns(urlpatterns)
|
hjanime/gffutils
|
gffutils/create.py
|
Python
|
mit
| 50,151
| 0
|
import copy
import warnings
import collections
import tempfile
import sys
import os
import sqlite3
import six
from textwrap import dedent
from gffutils import constants
from gffutils import version
from gffutils import bins
from gffutils import helpers
from gffutils import feature
from gffutils import interface
from gffutils import iterators
import logging
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def deprecation_handler(kwargs):
"""
As things change from version to version, deal with them here.
"""
# After reconsidering, let's leave `infer_gene_extent` for another release.
# But when it's time to deprecate it, use this code:
if 0:
if 'infer_gene_extent' in kwargs:
raise ValueError(
"'infer_gene_extent' is deprecated as of version 0.8.4 in "
"favor of more granular control over inferring genes and/or "
"transcripts. The previous default was "
"'infer_gene_extent=True`, which corresponds to the new "
"defaults "
"'disable_infer_genes=False' and "
"'disable_infer_transcripts=False'. Please see the docstring "
"for gffutils.create_db for details.")
if len(kwargs) > 0:
raise TypeError("unhandled kwarg in %s" % kwargs)
class _DBCreator(object):
def __init__(self, data, dbfn, force=False, verbose=False, id_spec=None,
merge_strategy='merge', checklines=10, transform=None,
force_dialect_check=False, from_string=False, dialect=None,
default_encoding='utf-8',
disable_infer_genes=False,
disable_infer_transcripts=False,
infer_gene_extent=True,
force_merge_fields=None,
text_factory=sqlite3.OptimizedUnicode,
pragmas=constants.default_pragmas, _keep_tempfiles=False,
**kwargs):
"""
Base class for _GFFDBCreator and _GTFDBCreator; see create_db()
function for docs
"""
self._keep_tempfiles = _keep_tempfiles
if force_merge_fields is None:
force_merge_fields = []
if merge_strategy == 'merge':
if set(['start', 'end']).intersection(force_merge_fields):
raise ValueError("Can't merge start/end fields since "
"they must be integers")
warn = set(force_merge_fields)\
.intersection(['frame', 'strand'])
for w in warn:
warnings.warn(
"%s field will be merged for features with the same ID; "
"this may result in unusable features." % w)
self.force_merge_fields = force_merge_fields
self.pragmas = pragmas
self.merge_strategy = merge_strategy
self.default_encoding = default_encoding
if not infer_gene_extent:
warnings.warn("'infer_gene_extent' will be deprecated. For now, "
"the following equivalent values were automatically "
"set: 'disable_infer_genes=True', "
"'disable_infer_transcripts=True'. Please use these "
"instead in the future.")
disable_infer_genes = True
disable_infer_transcripts = True
self.disable_infer_genes = disable_infer_genes
self.disable_infer_transcripts = disable_infer_transcripts
self._autoincrements = collections.defaultdict(int)
if force:
if os.path.exists(dbfn):
os.unlink(dbfn)
self.dbfn = dbfn
self.id_spec = id_spec
if isinstance(dbfn, six.string_types):
conn = sqlite3.connect(dbfn)
else:
conn = dbfn
self.conn = conn
self.conn.row_factory = sqlite3.Row
self.set_verbose(verbose)
if text_factory is not None:
if self.verbose == 'debug':
logger.debug('setting text factory to %s' % text_factory)
self.conn.text_factory = text_factory
self._data = data
self._orig_logger_level = logger.level
self.iterator = iterators.DataIterator(
data=data, checklines=checklines, transform=transform,
force_dialect_check=force_dialect_check, from_string=from_string,
dialect=dialect
)
def set_verbose(self, verbose=None):
if verbose == 'debug':
logger.setLevel(logging.DEBUG)
elif verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.ERROR)
self.verbose = verbose
def _increment_featuretype_autoid(self, key):
self._autoincrements[key] += 1
return '%s_%s' % (key, self._autoincrements[key])
def _id_handler(self, f):
"""
Given a Feature from self.iterator, figure out what the ID should be.
This uses `self.id_spec` identify the ID.
"""
# If id_spec is a string, convert to iterable for later
if isinstance(self.id_spec, six.string_types):
id_key = [self.id_spec]
elif hasattr(self.id_spec, '__call__'):
id_key = [self.id_spec]
# If dict, then assume it's a feature -> attribute mapping, e.g.,
# {'gene': 'gene_id'} for GTF
elif isinstance(self.id_spec, dict):
try:
id_key = self.id_spec[f.featuretype]
if isinstance(id_key, six.string_types):
id_key = [id_key]
# Otherwise, use default auto-increment.
except KeyError:
return self._increment_featuretype_autoid(f.featuretype)
# Otherwise assume it's an iterable.
else:
id_key = self.id_spec
# Then try them in order, returning the first one that works:
for k in id_key:
if hasattr(k, '__call__'):
_id = k(f)
if _id:
if _id.startswith('autoincrement:'):
return self._increment_featuretype_autoid(_id[14:])
return _id
else:
# use GFF fields rather than attributes for cases like :seqid:
# or :strand:
if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'):
# No [0] here -- only attributes key/vals are forced into
# lists, not standard GFF fields.
return getattr(f, k[1:-1])
else:
try:
return f.attributes[k][0]
except (KeyError, IndexError):
pass
# If we get here, then default autoincrement
return self._increment_featuretype_autoid(f.featuretype)
def _get_feature(self, ID):
c = self.conn.cursor()
results = c.execute(
constants._SELECT + ' WHERE id = ?', (ID,)).fetchone()
return feature.Feature(dialect=self.iterator.dialect, **results)
def _do_merge(self, f, merge_strategy, add_duplicate=False):
"""
Different merge strategies upon name conflicts.
"error":
Raise error
"warning"
Log a warning
"merge":
Combine old and new attribut
|
es -- but only if everything else
|
matches; otherwise error. This can be slow, but is thorough.
"create_unique":
Autoincrement based on the ID, always creating a new ID.
"replace":
Replaces existing database feature with `f`.
"""
if merge_strategy == 'error':
raise ValueError("Duplicate ID {0.id}".format(f))
if merge_strategy == 'warning':
logger.warning(
"Duplicate lines in file for id '{0.id}'; "
"ignoring all but the first".format(f))
return None, merge_str
|
cthtuf/django-cv
|
cv/wsgi.py
|
Python
|
mit
| 381
| 0
|
"""
WSGI config for cv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/
|
howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cv.settings")
application = get_wsgi_app
|
lication()
|
nkgilley/home-assistant
|
homeassistant/components/spider/climate.py
|
Python
|
apache-2.0
| 3,866
| 0.000259
|
"""Support for Spider thermostats."""
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import DOMAIN as SPIDER_DOMAIN
SUPPORT_FAN = ["Auto", "Low", "Medium", "High", "Boost 10", "Boost 20", "Boost 30"]
SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_COOL]
HA_STATE_TO_SPIDER = {
HVAC_MODE_COOL: "Cool",
HVAC_MODE_HEAT: "Heat",
HVAC_MODE_OFF: "Idle",
}
SPIDER_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_SPIDER.items()}
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Spider thermostat."""
if discovery_info is None:
return
devices = [
SpiderThermostat(hass.data[SPIDER_DOMAIN]["controller"], device)
for device in hass.data[SPIDER_DOMAIN]["thermostats"]
]
add_entities(devices, True)
class SpiderThermostat(ClimateEntity):
"""Representation of a thermostat."""
def __init__(self, api, thermostat):
"""Initialize the thermostat."""
self.api = api
self.thermostat = thermostat
@property
def supported_features(self):
"""Return the list of supported features."""
supports = SUPPORT_TARGET_TEMPERATURE
if self.thermostat.has_fan_mode:
supports |= SUPPORT_FAN_MODE
return supports
@property
def unique_id(self):
"""Return the id of the thermostat, if any."""
return self.th
|
ermostat.id
@property
def name(self):
"""Return the name of the thermostat, if any.""
|
"
return self.thermostat.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.thermostat.target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.thermostat.temperature_steps
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.thermostat.minimum_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.thermostat.maximum_temperature
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return SPIDER_STATE_TO_HA[self.thermostat.operation_mode]
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return SUPPORT_HVAC
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.thermostat.set_temperature(temperature)
def set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
self.thermostat.set_operation_mode(HA_STATE_TO_SPIDER.get(hvac_mode))
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat.current_fan_speed
def set_fan_mode(self, fan_mode):
"""Set fan mode."""
self.thermostat.set_fan_speed(fan_mode)
@property
def fan_modes(self):
"""List of available fan modes."""
return SUPPORT_FAN
def update(self):
"""Get the latest data."""
self.thermostat = self.api.get_thermostat(self.unique_id)
|
hackgnar/osquery
|
tools/tests/test_osqueryd.py
|
Python
|
bsd-3-clause
| 9,307
| 0.000537
|
#!/usr/bin/env python3
# Copyright (c) 2014-present, The osquery authors
#
# This source code is licensed as defined by the LICENSE file found in the
# root directory of this source tree.
#
# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only)
import glob
import os
import signal
import shutil
import time
import unittest
# osquery-specific testing utils
import test_base
class DaemonTests(test_base.ProcessGenerator, unittest.TestCase):
@test_base.flaky
def test_1_daemon_without_watchdog(self):
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
@test_base.flaky
def test_2_daemon_with_option(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
# Assign the variable after we have assurances it exists
self.assertTrue(info_exists())
# Lastly, verify that we have permission to read the file
data = ''
with open(glob.glob(info_path)[0], 'r') as fh:
try:
data = fh.read()
except:
pass
self.assertTrue(len(data) > 0)
daemon.kill()
@test_base.flaky
def test_3_daemon_with_watchdog(self):
# This test does not join the service threads properly (waits for int).
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
daemon.kill()
# This will take a few moments to make sure the client process
# dies when the watcher goes away
self.assertTrue(daemon.isDead(children[0]))
@test_base.flaky
def test_3_daemon_lost_worker(self):
# Test that killed workers are respawned by the watcher
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
# Kill only the child worker
os.kill(children[0], signal.SIGINT)
self.assertTrue(daemon.isDead(children[0]))
self.assertTrue(daemon.isAlive())
# Expect the children of the daemon to be respawned
def waitDaemonChildren():
children = daemon.getChildren()
return len(children) > 0
test_base.expectTrue(waitDaemonChildren)
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
@test_base.flaky
def test_4_daemon_sighup(self):
# A hangup signal should not do anything to the daemon.
daemon = self._run_daemon({
"disable_watchdog": True,
})
self.assertTrue(daemon.isAlive())
# Send SIGHUP on posix. Windows does not have SIGHUP so we use SIGTERM
sig = signal.SIGHUP if os.name != "nt" else signal.SIGTERM
os.kill(daemon.proc.pid, sig)
self.assertTrue(daemon.isAlive())
@test_base.flaky
def test_5_daemon_sigint(self):
# An interrupt signal will cause the daemon to stop.
daemon = self._run_daemon({
|
"disable_watchdog": True,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Send a SIGINT
os.kill(daemon.pid, signal.SIGINT)
self.assertTrue(daemon.isDead(daemon.pid, 10))
|
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
@test_base.flaky
def test_6_logger_mode(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
test_mode = 0o754 # Strange mode that should never exist
daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"logger_mode": test_mode,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
# Wait for the daemon to write the info log to disk before continuing
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
results_path = os.path.join(logger_path, "osqueryd.results.log")
def results_exists():
return os.path.exists(results_path)
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
test_base.expectTrue(results_exists)
info_path = glob.glob(info_path)[0]
# Both log files should exist, the results should have the given mode.
for pth in [info_path, results_path]:
self.assertTrue(os.path.exists(pth))
# Only apply the mode checks to .log files.
# TODO: Add ACL checks for Windows logs
if pth.find('.log') > 0 and os.name != "nt":
rpath = os.path.realpath(pth)
mode = os.stat(rpath).st_mode & 0o777
self.assertEqual(mode, test_mode)
daemon.kill()
def test_7_logger_stdout(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"logger_path": logger_path,
"verbose": True,
})
info_path = os.path.join(logger_path, "osqueryd.INFO")
def pathDoesntExist():
if os.path.exists(info_path):
return False
return True
self.assertTrue(daemon.isAlive())
self.assertTrue(pathDoesntExist())
daemon.kill()
def test_8_hostid_uuid(self):
# Test added to test using UUID as hostname ident for issue #3195
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "uuid",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_9_hostid_instance(self):
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "instance",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_config_check_exits(self):
daemon = self._run_daemon({
"config_check": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose":
|
codeofdusk/ProjectMagenta
|
src/keystrokeEditor/constants.py
|
Python
|
gpl-2.0
| 2,205
| 0.021769
|
# -*- coding: utf-8 -*-
actions = {
"up": _(u"Go up in the current buffer"),
"down": _(u"Go down in the current buffer"),
"left": _(u"Go to the previous buffer"),
"right": _(u"Go to the next buffer"),
"next_account": _(u"Focus the next session"),
"previous_account": _(u"Focus the previous session"),
"show_hide": _(u"Show or hide the GUI"),
"post_tweet": _(u"New tweet"),
"post_reply": _(u"Reply"),
"post_retweet": _(u"Retweet"),
"send_dm": _(u"Send direct message"),
"add_to_favourites": _(u"Mark as favourite"),
"remove_from_favourites": _(u"Remove from favourites"),
"follow": _(u"Open the user actions dia
|
logue"),
"user_details": _(u"See user details"),
"view_item": _(u"Show tweet"),
"exit": _(u"Quit"),
"open_timeline": _(u"Open user timeline"),
"remove_buffer": _(u"Destroy buffer"),
"interact": _(u"Interact with the currently focused tweet."),
"url": _(u"Open URL"),
"volume_up": _(u"Increase volume by 5%"),
"volume_down": _(u"Decrease volume by 5%"),
"go_home": _(u"Jump to the
|
first element of a buffer"),
"go_end": _(u"Jump to the last element of the current buffer"),
"go_page_up": _(u"Jump 20 elements up in the current buffer"),
"go_page_down": _(u"Jump 20 elements down in the current buffer"),
"update_profile": _(u"Edit profile"),
"delete": _(u"Delete a tweet or direct message"),
"clear_buffer": _(u"Empty the current buffer"),
"repeat_item": _(u"Repeat last item"),
"copy_to_clipboard": _(u"Copy to clipboard"),
"add_to_list": _(u"Add to list"),
"remove_from_list": _(u"Remove from list"),
"toggle_buffer_mute": _(u"Mute/unmute the active buffer"),
"toggle_session_mute": _(u"Mute/unmute the current session"),
"toggle_autoread": _(u"toggle the automatic reading of incoming tweets in the active buffer"),
"search": _(u"Search on twitter"),
"find": _(u"Find a string in the currently focused buffer"),
"edit_keystrokes": _(u"Show the keystroke editor"),
"view_user_lists": _(u"Show lists for a specified user"),
"get_more_items": _(u"load previous items"),
"reverse_geocode": _(u"Get geolocation"),
"view_reverse_geocode": _(u"Display the tweet's geolocation in a dialog"),
"get_trending_topics": _(u"Create a trending topics buffer"),
"open_conversation": _(u"View conversation"),
}
|
Eficent/sale-workflow
|
product_customer_code_sale/__init__.py
|
Python
|
agpl-3.0
| 880
| 0
|
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
# Author: Nicola Malcontenti <nicola.malcontenti@agilebg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more d
|
etails.
#
# You should have received a copy of the
|
GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import sale
|
abdullahcaliskan/Python
|
OOP/props.py
|
Python
|
gpl-2.0
| 467
| 0.038544
|
class Robot:
def __init__(self):
self.__name = ""
@property
def name(self):
return self.__name
@name.setter
|
def name(self, x):
self.__name = x
class Car:
def __init__(self, model=None):
self.__set_model(model)
def __set_model(self, model):
self.__model = model
def __get_model(self):
return self.__model
model = property(__get_model, __set_model)
x = Robot()
x.name = "apo"
prin
|
t(x.name)
c = Car()
c.model = "Mercedes"
print(c.model)
|
vinneyto/lab-portal
|
portal/test_models.py
|
Python
|
bsd-3-clause
| 2,587
| 0.002052
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.test import TestCase
from models import Student, StudyGroup, Task, Lab, Subject, GroupSubject
class PortalTest(TestCase):
def setUp(self):
self.study_group1 = StudyGroup.objects.create(name="10А")
self.study_group2 = StudyGroup.objects.create(name="11Б")
self.subject1 = Subject.objects.create(name="Оптика")
self.subject2 = Subject.objects.create(name="Механика")
self.group_subject11 = GroupSubject.objects.create(
study_group=self.study_group1, subject=self.subject1
)
self.group_subject22 = GroupSubject.objects.create(
study_group=self.study_group2, subject=self.subject2
)
self.student1 = Student.objects.create_user(
username="ivan", email=None, password="123456", study_group=self.study_group1
)
self.student2 = Student.objects.create_use
|
r(
u
|
sername="pavel", email=None, password="123456", study_group=self.study_group2
)
self.lab1 = Lab.objects.create(name="Кольца ньютона", subject=self.subject1)
self.lab2 = Lab.objects.create(name="Атвуд", subject=self.subject2)
def test_task_create(self):
has_error = False
try:
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertFalse(has_error)
def test_task_create_double(self):
"""
Должна выскочить ошибка валидации - пытаемся создать 2 одинаковых задания
:return:
"""
has_error = False
try:
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertTrue(has_error)
# Проверяем что по данной учебной группе есть только одно задание
subject = self.group_subject11.subject
study_group = self.group_subject11.study_group
task_count = Task.objects.filter(
lab__subject__pk=subject.id, student__study_group__pk=study_group.id
).count()
self.assertTrue(task_count, 1)
|
Sharpe49/ts2
|
ts2/routing/route.py
|
Python
|
gpl-2.0
| 12,444
| 0.001848
|
#
# Copyright (C) 2008-2013 by Nicolas Piganeau
# npi@m4x.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
import ts2.routing
class RoutesModel(QtCore.QAbstractTableModel):
"""The RoutesModel is a table model for routes that is used in the editor
"""
def __init__(self, editor):
"""Constructor for the RoutesModel class"""
super().__init__()
self._editor = editor
def rowCount(self, parent = QtCore.QModelIndex()):
"""Returns the number of rows of the model, corresponding to the
number of routes."""
return len(self._editor.routes)
def columnCount(self, parent = QtCore.QModelIndex()):
"""Returns the number of columns of the model"""
return 4
def data(self, index, role = Qt.DisplayRole):
"""Returns the data at the given index"""
if role == Qt.DisplayRole or role == Qt.EditRole:
routes = list(sorted(self._editor.routes.values()))
if index.column() == 0:
return routes[index.row()].routeNum
elif index.column() == 1:
return routes[index.row()].beginSignal.name
elif index.column() == 2:
return routes[index.row()].endSignal.name
elif index.column() == 3:
return routes[index.row()].initialState
return None
def setData(self, index, value, role):
"""Updates data when modified in the view"""
if role == Qt.EditRole:
if index.column() == 3:
routeNum = int
|
(index.sibling(index.row(), 0).data())
self._editor.routes[routeNum].initialState = value
self.dataChanged.emit(index, index)
return True
return False
def headerData(self, section, orientation, role = Qt.DisplayRole):
"""Returns the header la
|
bels"""
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
if section == 0:
return self.tr("Route no.")
elif section == 1:
return self.tr("Begin Signal")
elif section == 2:
return self.tr("End Signal")
elif section == 3:
return self.tr("Initial State")
return None
def flags(self, index):
"""Returns the flags of the model"""
retFlag = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 3:
retFlag |= Qt.ItemIsEditable
return retFlag
class Route(QtCore.QObject):
"""@brief Path between two signals
A route is a path between two signals. If a route is activated, the path
is selected, and the signals at the beginning and the end of the route are
changed and the conflicting possible other routes are inhibited. Routes
are static and defined in the game file. The player can only activate or
deactivate them.
"""
def __init__(self, simulation, routeNum, beginSignal, endSignal,
initialState = 0):
"""Constructor of the Route class. After construction, the directions
dictionary must be filled and then the _positions list must be
populated by calling createPositionsList().
@param routeNum The route number (id)
@param beginSignal Pointer to the SignalItem at which the route starts
@param endSignal Pointer to the SignalItem at which the route ends"""
super().__init__(simulation)
self.simulation = simulation
self._routeNum = routeNum
bsp = ts2.routing.Position(beginSignal, beginSignal.previousItem, 0)
esp = ts2.routing.Position(endSignal, endSignal.previousItem, 0)
self._positions = [bsp, esp]
self._directions = {}
self._initialState = initialState
self._persistent = False
routeSelected = QtCore.pyqtSignal()
routeUnselected = QtCore.pyqtSignal()
@property
def positions(self):
"""Returns the positions list of this route."""
return self._positions
@property
def routeNum(self):
"""Returns this route number"""
return self._routeNum
@property
def beginSignal(self):
""" Returns the SignalItem where this route starts."""
return self._positions[0].trackItem
@property
def endSignal(self):
"""Returns the SignalItem where this route ends."""
return self._positions[-1].trackItem
@property
def initialState(self):
"""Returns the state of the route at the beginning of the simulation.
0 => Not activated
1 => Activated, non persistent
2 => Activated, persistent"""
return self._initialState
@initialState.setter
def initialState(self, value):
"""Setter function for the initialState property"""
value = int(value)
if value < 0 or value > 2:
value = 0
self._initialState = value
def getRouteState(self):
"""Returns the current route state:
0 => Not activated
1 => Activated, non persistent
2 => Activated, persistent."""
if self.beginSignal.nextActiveRoute is not None and \
self.beginSignal.nextActiveRoute == self:
if self._persistent:
return 2
else:
return 1
else:
return 0
@property
def directions(self):
"""Returns the directions dictionary"""
return self._directions
def direction(self, tiId):
"""Returns the direction of this route at the trackItem with id tiId
"""
return self._directions[tiId]
def appendDirection(self, tiId, direction):
""" Appends a direction to a TrackItem on the Route.
@param tiId The trackItem number to which we add direction
@param direction The direction to append.
For points, 0 means normal and other values means reverse"""
self._directions[tiId] = direction
def createPositionsList(self):
""" Populates the _positions list.
If the route is invalid, it leaves the _positions list empty.
Also completes the _directions map, with obvious directions."""
cur = self._positions[0].next()
it = 1
while not cur.isOut():
if cur == self._positions[-1]:
return True
self._positions.insert(it, cur)
it += 1
if cur.trackItem.tiType.startswith("P"):
if cur.previousTI == cur.trackItem.normalItem:
self._directions[cur.trackItem.tiId] = 0
elif cur.previousTI == cur.trackItem.reverseItem:
self._directions[cur.trackItem.tiId] = 1
elif cur.previousTI == cur.trackItem.commonItem \
and cur.trackItem.tiId not in self._directions:
self._directions[cur.trackItem.tiId] = 0
cur = cur.next(0, self._directions.get(cur.trackItem.tiId, -1))
QtCore.qCritical(self.tr("Invalid route %i. "
"Impossible to link beginSignal with endSignal"
% self.routeNum))
return False
def links(self, si1, si2):
""" Returns true if the route links SignalItem si1 to SignalItem si2.
@param si1 First SignalItem
@param si2 Last SignalItem"""
if self.beginSignal =
|
alexlo03/ansible
|
lib/ansible/module_utils/yumdnf.py
|
Python
|
gpl-3.0
| 6,502
| 0.001846
|
# -*- coding: utf-8 -*-
#
# # Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Contributing Authors:
# - Ansible Core Team
# - Eduard Snesarev (@verm666)
# - Berend De Schouwer (@berenddeschouwer)
# - Abhijeet Kasurde (@Akasurde)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import time
import glob
import tempfile
from abc import ABCMeta, abstractmethod
from ansible.module_utils._text import to_native
from ansible.module_utils.six import with_metaclass
yumdnf_argument_spec = dict(
argument_spec=dict(
allow_downgrade=dict(type='bool', default=False),
autoremove=dict(type='bool', default=False),
bugfix=dict(required=False, type='bool', default=False),
conf_file=dict(type='str'),
disable_excludes=dict(type='str', default=None),
disable_gpg_check=dict(type='bool', default=False),
disable_plugin=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
download_only=dict(type='bool', default=False),
enable_plugin=dict(type='list', default=[]),
enablerepo=dict(type='list', default=[]),
exclude=dict(type='list', default=[]),
installroot=dict(type='str', default="/"),
install_repoquery=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', aliases=['pkg'], default=[]),
releasever=dict(default=None),
security=dict(type='bool', default=False),
skip_broken=dict(type='bool', default=False),
# removed==absent, installed==present, these are accepted as aliases
state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
update_only=dict(required=False, default="no", type='bool'),
validate_certs=dict(type='bool', default=True),
lock_timeout=dict(type='int', default=0),
),
required_one_of=[['name', 'list', 'update_cache']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True,
)
class YumDnf(with_metaclass(ABCMeta, object)):
"""
Abstract class that handles the population of instance variables that should
be identical between both YUM and DNF modules because of the feature parity
and shared argument spec
"""
def __init__(self, module):
self.module = module
self.allow_downgrade = self.module.params['allow_downgrade']
self.autoremove = self.module.params['autoremove']
self.bugfix = self.module.params['bugfix']
self.conf_file = self.module.params['conf_file']
self.disable_excludes = self.module.params['disable_excludes']
self.disable_gpg_check = self.module.params['disable_gpg_check']
self.disable_plugin = self.module.params['disable_plugin']
self.disablerepo = self.module.params.get('disablerepo', [])
self.download_only = self.module.params['download_only']
self.enable_plugin = self.module.params['enable_plugin']
self.enablerepo = self.module.params.get('enablerepo', [])
self.exclude = self.module.params['exclude']
self.installroot = self.module.params['installroot']
self.
|
install_repoquery = self.module.params['install_repoquery']
self.list = self.module.params['list']
self.names = [p.strip() for p in self.module.params['name']]
self.releasever = self.module.params['releasever']
self.security = self.module.params['security']
self.skip_broken = self.module.params['skip_broken']
self.state = self.module.params['state']
self.update_only = self.module.params['upd
|
ate_only']
self.update_cache = self.module.params['update_cache']
self.validate_certs = self.module.params['validate_certs']
self.lock_timeout = self.module.params['lock_timeout']
# It's possible someone passed a comma separated string since it used
# to be a string type, so we should handle that
self.names = self.listify_comma_sep_strings_in_list(self.names)
self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo)
self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo)
self.exclude = self.listify_comma_sep_strings_in_list(self.exclude)
# Fail if someone passed a space separated string
# https://github.com/ansible/ansible/issues/46301
if any((' ' in name and '@' not in name and '==' not in name for name in self.names)):
module.fail_json(
msg='It appears that a space separated string of packages was passed in '
'as an argument. To operate on several packages, pass a comma separated '
'string of packages or a list of packages.'
)
# This should really be redefined by both the yum and dnf module but a
# default isn't a bad idea
self.lockfile = '/var/run/yum.pid'
def wait_for_lock(self):
'''Poll until the lock is removed if timeout is a positive number'''
if (os.path.isfile(self.lockfile) or glob.glob(self.lockfile)):
if self.lock_timeout > 0:
for iteration in range(0, self.lock_timeout):
time.sleep(1)
if not os.path.isfile(self.lockfile) and not glob.glob(self.lockfile):
return
self.module.fail_json(msg='{0} lockfile is held by another process'.format(self.pkg_mgr_name))
def listify_comma_sep_strings_in_list(self, some_list):
"""
method to accept a list of strings as the parameter, find any strings
in that list that are comma separated, remove them from the list and add
their comma separated elements to the original list
"""
new_list = []
remove_from_original_list = []
for element in some_list:
if ',' in element:
remove_from_original_list.append(element)
new_list.extend([e.strip() for e in element.split(',')])
for element in remove_from_original_list:
some_list.remove(element)
some_list.extend(new_list)
if some_list == [""]:
return []
return some_list
@abstractmethod
def run(self):
raise NotImplementedError
|
SinnerSchraderMobileMirrors/django-cms
|
cms/migrations/0041_auto__add_usersettings.py
|
Python
|
bsd-3-clause
| 15,866
| 0.00832
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserSettings'
db.create_table(u'cms_usersettings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('language', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal('cms', ['UserSettings'])
def backwards(self, orm):
# Deleting model 'UserSettings'
db.delete_table(u'cms_usersettings')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('djan
|
go.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'pri
|
mary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'def
|
kubeflow/kfp-tekton-backend
|
components/gcp/container/component_sdk/python/kfp_component/google/bigquery/__init__.py
|
Python
|
apache-2.0
| 601
| 0.001664
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by
|
applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the spe
|
cific language governing permissions and
# limitations under the License.
from ._query import query
|
gaqzi/ansible-modules-extras
|
packaging/language/composer.py
|
Python
|
gpl-3.0
| 6,200
| 0.008548
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: composer
author: '"Dimitrios Tydeas Mengidis (@dmtrs)" <tydeas.dr@gmail.com>'
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on
required: false
default: install
working_dir:
description:
- Directory of your project ( see --working-dir )
required: true
default: null
aliases: [ "working-dir" ]
prefer_source:
description:
- Forces installation from package sources when possible ( see --prefer-source )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions ( see --prefer-dist )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages ( see --no-dev )
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json ( see --no-scripts )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to class
|
map to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "optimize-autoloader" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction
'''
|
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer: command=install working_dir=/path/to/project
'''
import os
import re
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
if "Nothing to install or update" in string:
return False
else:
return True
def composer_install(module, command, options):
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options))
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(default="install", type="str", required=False),
working_dir = dict(aliases=["working-dir"], required=True),
prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
no_dev = dict(default="yes", type="bool", aliases=["no-dev"]),
no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]),
no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]),
optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]),
),
supports_check_mode=True
)
options = []
# Default options
options.append('--no-ansi')
options.append('--no-progress')
options.append('--no-interaction')
options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])])
# Get composer command with fallback to default
command = module.params['command']
# Prepare options
if module.params['prefer_source']:
options.append('--prefer-source')
if module.params['prefer_dist']:
options.append('--prefer-dist')
if module.params['no_dev']:
options.append('--no-dev')
if module.params['no_scripts']:
options.append('--no-scripts')
if module.params['no_plugins']:
options.append('--no-plugins')
if module.params['optimize_autoloader']:
options.append('--optimize-autoloader')
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_install(module, command, options)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output)
# import module snippets
from ansible.module_utils.basic import *
main()
|
melmothx/jsonbot
|
jsb/plugs/core/rc.py
|
Python
|
mit
| 1,732
| 0.017321
|
# jsb/plugs/core/rc.py
#
#
""" jsonbot resource files .. files with the .jsb extension which consists of commands to be executed. """
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.utils.url import geturl2
from jsb.utils.exception import handle_exception
from jsb.utils.generic import waitforqueue, waitevents
## basic imports
import copy
## defines
cpy = copy.deepcopy
## rc command
def handle_rc(bot, event):
""" import aliases by url. assumes a .RC file. 1 alias per line """
if not event.rest: event.missing("<file>|<url>") ; return
teller = 0
t = event.rest
waiting = []
try:
try:
if t.startswith("http"): data = geturl2(t)
else: data = open(t, 'r').read()
except IOError, ex: event.reply("I/O error: %s" % str(ex)) ; return
if not data: event.reply("can't get data from %s" % event.rest) ; return
for d in data.split("\n"):
i = d.strip()
if not i: continue
if i.startswith("#"): continue
e = cpy(event)
e.txt = "%s" % i.strip()
e.direct = True
bot.put(e)
waiting.append(e)
#re
|
sult = bot.docmnd(event.userhost, event.channel, i, wait=1, event=event)
#if result: result.waitall()
teller += 1
#waitevents(waiting)
event.reply("%s commands executed" % teller)
except Exception, ex: event.reply("an error occured: %s" % str(ex)) ; handle_exception()
cmnds.add("rc", handle_rc, ["OPER"], threaded=True)
examples.add
|
("rc", "execute a file of jsonbot commands .. from file or url", "1) rc resource.jsb 2) rc http://jsonbot.org/resource.jsb")
|
billhoffman/drake
|
drake/bindings/python/pydrake/test/testRBTCoM.py
|
Python
|
bsd-3-clause
| 1,228
| 0.002443
|
from __future__ import print_function
import unittest
import numpy as np
import pydrake
import os.path
class TestRBTCoM(unittest.TestCase):
def testCoM0(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
"examples/Pendulum/Pendulum.urdf"))
kinsol = r.doKinematics(np.zeros((7, 1)
|
), np.zeros((7, 1)))
c = r.centerOfMass(kinsol)
self.assertTrue(np.allclose(c.flat, [0.0, 0.0, -0.2425], atol=1e-4))
def testCoMJacobian(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
"examples/Pendulum/Pendulum.urdf"))
q = r.getRandomConfiguration()
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue
|
(np.shape(J) == (3, 7))
q = r.getZeroConfiguration()
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue(np.allclose(J.flat, [1., 0., 0., 0., -0.2425, 0., -0.25,
0., 1., 0., 0.2425, 0., 0., 0.,
0., 0., 1., 0., 0., 0., 0.], atol=1e-4))
if __name__ == '__main__':
unittest.main()
|
sdss/marvin
|
tests/tools/test_rss.py
|
Python
|
bsd-3-clause
| 8,275
| 0.001693
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2018-07-24
# @Filename: test_rss.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-08-04 13:35:39
import astr
|
opy.io.f
|
its
import astropy.table
import numpy
import pytest
import marvin
from ..conftest import Galaxy, set_the_config
@pytest.fixture(scope='session')
def galaxy(get_params, plateifu):
"""Yield an instance of a Galaxy object for use in tests."""
release, bintype, template = get_params
set_the_config(release)
gal = Galaxy(plateifu=plateifu)
gal.set_params(bintype=bintype, template=template, release=release)
gal.set_filepaths()
gal.set_galaxy_data()
yield gal
@pytest.fixture(scope='session')
def rss_session(galaxy, mode):
# These get created only once per session.
if mode == 'auto' or str(galaxy.bintype) != 'SPX':
pytest.skip()
if mode == 'local':
rss = marvin.tools.RSS(filename=galaxy.rsspath, release=galaxy.release, mode='local')
else:
rss = marvin.tools.RSS(plateifu=galaxy.plateifu, release=galaxy.release, mode='remote')
rss.expdata = galaxy.rss
yield rss
@pytest.fixture(scope='function')
def rss(rss_session):
# In some of the tests we modify the RSS objects. Here we implement
# a setup procedure that "unloads" the RSSFiber objects and resets the
# autoload attribute.
for rssfiber in rss_session:
rssfiber.loaded = False
rss_session.autoload = True
yield rss_session
@pytest.fixture(scope='session')
def rssfiber(rss_session):
fiberid = 0
if rss_session[fiberid].loaded is False:
rss_session[fiberid].load()
yield rss_session[fiberid]
@pytest.mark.usefixtures('monkeyauth')
class TestRSS(object):
def test_rss_init(self, rss):
assert isinstance(rss, marvin.tools.RSS)
assert isinstance(rss, marvin.tools.mixins.NSAMixIn)
assert isinstance(rss, list)
assert isinstance(rss.obsinfo, astropy.table.Table)
if rss.mode == 'file':
assert isinstance(rss.data, astropy.io.fits.HDUList)
assert rss._wavelength is not None
assert len(rss) == rss._nfibers
rss.autoload = False # To make things faster for this test
assert all([isinstance(rss_fiber, marvin.tools.rss.RSSFiber) for rss_fiber in rss])
@pytest.mark.parametrize('autoload', [True, False])
def test_rss_autoload(self, rss, autoload):
rss.autoload = autoload
assert rss[0].loaded is autoload
def test_load(self, rss):
rss.autoload = False
assert rss[0].loaded is False
rss[0].load()
assert rss[0].loaded is True
def test_load_all(self, rss):
if rss.mode == 'remote':
pytest.skip()
rss.load_all()
assert all([rss_fiber.loaded is True for rss_fiber in rss])
def test_obsinfo_to_rssfiber(self, rss):
# We get it in this complicated way so that it is a different way of
# obtianing it than in the _populate_fibres method.
ifusize = int(str(rss.ifu)[0:-2])
exp_idx = 0
n_fiber = 1
for rssfiber in rss:
assert numpy.all(rss.obsinfo[exp_idx] == rssfiber.obsinfo)
n_fiber += 1
if n_fiber > ifusize:
n_fiber = 1
exp_idx += 1
def test_getcube(self, rss):
cube = rss.getCube()
assert isinstance(cube, marvin.tools.Cube)
assert cube.mode == rss.mode
assert cube.plateifu == rss.plateifu
assert cube.mangaid == rss.mangaid
assert cube.release == rss.release
def test_select_fibers(self, rss):
# Skipping for API or it will take forever. Should not matter since
# we have already tested slicing for API.
if rss.data_origin == 'api':
pytest.skip()
fibers_expnum = rss.select_fibers(exposure_no=rss.expdata['expnum'])
assert len(fibers_expnum) == rss.expdata['nfiber']
assert fibers_expnum[0].obsinfo['EXPNUM'][0] == rss.expdata['expnum']
fibers_mjd = rss.select_fibers(mjd=1234)
assert len(fibers_mjd) == 0
fibers_mjd = rss.select_fibers(mjd=rss.expdata['mjd'])
assert len(fibers_mjd) == (rss.expdata['nexp'] * rss.expdata['nfiber'])
assert fibers_mjd[0].obsinfo['MJD'][0] == rss.expdata['mjd']
@pytest.mark.usefixtures('monkeyauth')
class TestRSSFiber(object):
def test_rssfiber_spectra(self, rssfiber):
assert isinstance(rssfiber, marvin.tools.RSSFiber)
assert isinstance(rssfiber.rss, marvin.tools.RSS)
assert isinstance(rssfiber.obsinfo, astropy.table.Table)
assert hasattr(rssfiber, 'ivar')
assert isinstance(rssfiber.ivar, numpy.ndarray)
assert len(rssfiber.ivar) == len(rssfiber.wavelength)
assert hasattr(rssfiber, 'mask')
assert isinstance(rssfiber.mask, numpy.ndarray)
assert len(rssfiber.mask) == len(rssfiber.wavelength)
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum = getattr(rssfiber, dm_element.name, None)
assert spectrum is not None
assert isinstance(spectrum, numpy.ndarray)
assert len(spectrum) == len(rssfiber.wavelength)
def test_rssfiber_data(self, rssfiber):
rss_filename = rssfiber.rss._getFullPath()
rss_hdu = astropy.io.fits.open(rss_filename)
numpy.testing.assert_allclose(rss_hdu['FLUX'].data[rssfiber.fiberid, :], rssfiber.value)
numpy.testing.assert_allclose(rss_hdu['IVAR'].data[rssfiber.fiberid, :], rssfiber.ivar)
numpy.testing.assert_array_equal(rss_hdu['MASK'].data[rssfiber.fiberid, :], rssfiber.mask)
for dm_element in rssfiber.rss.datamodel.rss:
if dm_element.name == 'flux':
continue
fits_data = rss_hdu[dm_element.fits_extension()].data[rssfiber.fiberid, :]
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
for dm_element in rssfiber.rss.datamodel.spectra:
fits_data = rss_hdu[dm_element.fits_extension()].data
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
def test_rssfiber_slice(self, rssfiber):
n_elements = 10
sliced = rssfiber[0:n_elements]
assert len(sliced.value) == n_elements
numpy.testing.assert_allclose(sliced.value, rssfiber.value[0:n_elements])
assert len(sliced.ivar) == n_elements
assert len(sliced.mask) == n_elements
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum_sliced = getattr(sliced, dm_element.name, None)
assert len(spectrum_sliced) == n_elements
assert sliced.obsinfo is not None
def test_rssfiber_masked(self, rssfiber):
assert numpy.sum(rssfiber.masked.mask) > 0
def test_rssfiber_descale(self, rssfiber):
descaled = rssfiber.descale()
numpy.testing.assert_allclose(descaled.value, rssfiber.value * rssfiber.unit.scale)
assert descaled.obsinfo is not None
class TestPickling(object):
def test_pickling_file(self, temp_scratch, rss):
if rss.data_origin == 'file':
assert rss.data is not None
rss_file = temp_scratch.join('test_rss.mpf')
rss.save(str(rss_file))
assert rss_file.check() is True
rss_restored = marvin.tools.RSS.restore(str(rss_file))
assert rss_restored.data_origin == rss.data_origin
assert isinstance(rss_restored, marvin.tools.RSS)
assert len(rss_restored) > 0
assert isinstance(rss_restored[0], marvin.tools.RSSFiber)
assert numpy.sum(rss_restored[0].value) > 0
if rss.data_origin == 'file':
asser
|
ligo-cbc/pycbc
|
pycbc/results/dq.py
|
Python
|
gpl-3.0
| 2,187
| 0.002286
|
'''This module contains utilities for following up search triggers'''
# JavaScript for searching the aLOG
redirect_javascript = """<script type="
|
text/javascript">
function redirect(form,way)
{
// Set location to form and submit.
if(form != '')
{
document.forms[form].action=way;
document.forms[form].submit();
}
else
{
window.top.loc
|
ation = way;
}
}
</script>"""
search_form_string="""<form name="%s_alog_search" id="%s_alog_search" method="post">
<input type="hidden" name="srcDateFrom" id="srcDateFrom" value="%s" size="20"/>
<input type="hidden" name="srcDateTo" id="srcDateTo" value="%s" size="20"/>
</form>"""
data_h1_string = """H1
<a href=https://ldas-jobs.ligo-wa.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('h1_alog_search',
'https://alog.ligo-wa.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
data_l1_string="""L1
<a href=https://ldas-jobs.ligo-la.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('l1_alog_search',
'https://alog.ligo-la.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
def get_summary_page_link(ifo, utc_time):
"""Return a string that links to the summary page and aLOG for this ifo
Parameters
----------
ifo : string
The detector name
utc_time : sequence
First three elements must be strings giving year, month, day resp.
Returns
-------
return_string : string
String containing HTML for links to summary page and aLOG search
"""
search_form = search_form_string
data = {'H1': data_h1_string, 'L1': data_l1_string}
if ifo not in data:
return ifo
else:
# alog format is day-month-year
alog_utc = '%02d-%02d-%4d' % (utc_time[2], utc_time[1], utc_time[0])
# summary page is exactly the reverse
ext = '%4d%02d%02d' % (utc_time[0], utc_time[1], utc_time[2])
return_string = search_form % (ifo.lower(), ifo.lower(), alog_utc, alog_utc)
return return_string + data[ifo] % ext
|
kolypto/py-mongosql
|
mongosql/util/history_proxy.py
|
Python
|
bsd-2-clause
| 5,787
| 0.003974
|
from copy import deepcopy
from sqlalchemy import inspect
from sqlalchemy.orm.base import DEFAULT_STATE_ATTR
from sqlalchemy.orm.state import InstanceState
from mongosql.bag import ModelPropertyBags
class ModelHistoryProxy:
""" Proxy object to gain access to historical model attributes.
This leverages SqlAlchemy attribute history to provide access to the previous value of an
attribute. The only reason why this object exists is because keeping two instances in memory may
be expensive. But because normally you'll only need a field or two, the decision was to use
this magic proxy object that will load model history on demand.
Why would you need to access model history at all?
Because CrudHelper's update method (i.e., changing model fields) gives you two objects: the
current instance, and the old instance, so that your custom code in the update handler can
compare those fields.
For instance, when a certain object is being moved from one User to another, you might want
to notify both of them. In that case, you'll need access to the historical user.
The initial solution was to *copy* the instance, apply the modifications from JSON to a copy,
and then feed both of them to the save handler... but copying was expensive.
That's why we have this proxy: it does not load all the fields of the historical model,
but acts as a proxy object (__getattr__()) that will get those properties on demand.
"""
def __init__(self, instance):
# Save the information that we'll definitely need
self.__instance = instance
self.__model = self.__instance.__class__
self.__bags = ModelPropertyBags.for_model(self.__model) # type: ModelPropertyBags
self.__inspect = inspect(instance) # type: InstanceState
# Copy every field onto ourselves
self.__copy_from_instance(self.__instance)
# Enable accessing relationships through our proxy
self.__install_instance_state(instance)
def __copy_from_instance(self, instance):
""" Copy all attributes of `instance` to `self`
Alright, this code renders the whole point of having ModelHistoryProxy void.
There is an issue with model history:
"Each time the Session is flushed, the history of each attribute is reset to empty.
The Session by default autoflushes each time a Query is invoked"
https://docs.sqlalchemy.org/en/latest/orm/internals.html#sqlalchemy.orm.state.AttributeState.history
This means that as soon as you load a relationship, model history is reset.
To solve this, we have to make a copy of this model.
All attributes are set on `self`, so accessing `self.attr` will not trigger `__getattr__()`
"""
""" Copy the given list of columns from the instance onto self """
insp = self.__inspect # type: InstanceState
# Copy all values onto `self`
for column_name in self.__bags.columns.names:
# Skip unloaded columns (because that would emit sql queries)
# Also skip the columns that were already copied (perhaps, mutable columns?)
if column_name not in insp.unloaded:
# The state
attr_state = insp.attrs[column_name] # type: AttributeState
# Get the historical value
# deepcopy() ensures JSON and ARRAY values are copied in full
hist_val = deepcopy(_get_historical_value(attr_state))
# Remove the value onto `self`: we're bearing the value now
setattr(self, column_name, hist_val)
def __install_instance_state(self, instance):
""" Install an InstanceState, so that relationship descriptors can work properly """
# These lines install the internal SqlAlchemy's property on our proxy
# This property mimics the original object.
# This ensures that we can access relationship attributes through a ModelHistoryProxy object
# Example:
# hist = ModelHistoryProxy(comment)
# hist.user.id # wow!
instance_state = getattr(instance, DEFAULT_STATE_ATTR)
my_state = InstanceState(self, instance
|
_state.manager)
my_state.key = instance_state.key
my_state.session_id = instance_state.session_id
setattr(self, DEFAULT_STATE_ATTR, my_state)
def __getattr__(self, key):
# Get a relationship:
if key in self.__bags.relations:
relationship = getattr(self.__model, key)
return relationship.__get__(self, self.__model)
# Get a property (@prope
|
rty)
if key in self.__bags.properties:
# Because properties may use other columns,
# we have to run it against our`self`, because only then it'll be able to get the original values.
return getattr(self.__model, key).fget(self)
# Every column attribute is accessed through history
attr = self.__inspect.attrs[key]
return _get_historical_value(attr)
def _get_historical_value(attr):
""" Get the previous value of an attribute
This is where the magic happens: this method goes into the SqlAlchemy instance and
obtains the historical value of an attribute called `key`
"""
# Examine attribute history
# If a value was deleted (e.g. replaced) -- we return it as the previous version.
history = attr.history
if not history.deleted:
# No previous value, return the current value instead
return attr.value
else:
# Return the previous value
# It's a tuple, since History supports collections, but we do not support these,
# so just get the first element
return history.deleted[0]
|
jac2130/BayesGame
|
foundations-of-python-network-programming/python2/01/basicserver.py
|
Python
|
mit
| 745
| 0.013423
|
#/usr/bin/env python
#Base Server -Chapter three -basicserver.py
import socket, traceback
host=''
port=8080
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
print "Waiting for connections..."
s.listen(1)
while True:
try:
clientsock, clientaddr=s.accept()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
continue
try:
print "Got connection from", clientsock.getpeername()
except (KeyboardInterrupt, SystemExit):
raise
except:
|
traceback.print_exc()
try:
clientsock.close()
except KeyboardInterrupt:
|
raise
except:
traceback.print_exc()
|
hrashk/sympy
|
sympy/utilities/tests/test_iterables.py
|
Python
|
bsd-3-clause
| 24,090
| 0.00083
|
from textwrap import dedent
from sympy import (
symbols, Integral, Tuple, Dummy, Basic, default_sort_key, Matrix,
factorial, true)
from sympy.combinatorics import RGS_enum, RGS_unrank, Permutation
from sympy.utilities.iterables import (
_partition, _set_partitions, binary_partitions, bracelets, capture,
cartes, common_prefix, common_suffix, dict_merge, flatten,
generate_bell, generate_derangements, generate_involutions,
generate_oriented_forest, group, has_dups, kbins, minlex, multiset,
multiset_combinations, multiset_partitions, multiset_permutations,
necklaces, numbered_symbols, ordered, partitions, permutations,
postfixes, postorder_traversal, prefixes, reshape, rotate_left,
rotate_right, runs, sift, subsets, take, topological_sort, unflatten,
uniq, variations)
from sympy.core.singleton import S
from sympy.functions.elementary.piecewise import Piecewise, ExprCondPair
from sympy.utilities.pytest import raises
w, x, y, z = symbols('w,x,y,z')
def test_postorder_traversal():
expr = z + w*(x + y)
expected = [z, w, x, y
|
, x + y, w*(x + y), w*(x + y) + z]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(expr, keys=True)) == expected
expr = Piecewise((x, x < 1), (x**2, True))
expected = [
x, 1, x, x < 1, ExprCondPair(x, x < 1),
2, x, x**2, true,
ExprCondPair(x**2, True), Piecewise((x, x < 1), (x**2, True))
]
assert l
|
ist(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(
[expr], keys=default_sort_key)) == expected + [[expr]]
assert list(postorder_traversal(Integral(x**2, (x, 0, 1)),
keys=default_sort_key)) == [
2, x, x**2, 0, 1, x, Tuple(x, 0, 1),
Integral(x**2, Tuple(x, 0, 1))
]
assert list(postorder_traversal(('abc', ('d', 'ef')))) == [
'abc', 'd', 'ef', ('d', 'ef'), ('abc', ('d', 'ef'))]
def test_flatten():
assert flatten((1, (1,))) == [1, 1]
assert flatten((x, (x,))) == [x, x]
ls = [[(-2, -1), (1, 2)], [(0, 0)]]
assert flatten(ls, levels=0) == ls
assert flatten(ls, levels=1) == [(-2, -1), (1, 2), (0, 0)]
assert flatten(ls, levels=2) == [-2, -1, 1, 2, 0, 0]
assert flatten(ls, levels=3) == [-2, -1, 1, 2, 0, 0]
raises(ValueError, lambda: flatten(ls, levels=-1))
class MyOp(Basic):
pass
assert flatten([MyOp(x, y), z]) == [MyOp(x, y), z]
assert flatten([MyOp(x, y), z], cls=MyOp) == [x, y, z]
assert flatten(set([1, 11, 2])) == list(set([1, 11, 2]))
def test_group():
assert group([]) == []
assert group([], multiple=False) == []
assert group([1]) == [[1]]
assert group([1], multiple=False) == [(1, 1)]
assert group([1, 1]) == [[1, 1]]
assert group([1, 1], multiple=False) == [(1, 2)]
assert group([1, 1, 1]) == [[1, 1, 1]]
assert group([1, 1, 1], multiple=False) == [(1, 3)]
assert group([1, 2, 1]) == [[1], [2], [1]]
assert group([1, 2, 1], multiple=False) == [(1, 1), (2, 1), (1, 1)]
assert group([1, 1, 2, 2, 2, 1, 3, 3]) == [[1, 1], [2, 2, 2], [1], [3, 3]]
assert group([1, 1, 2, 2, 2, 1, 3, 3], multiple=False) == [(1, 2),
(2, 3), (1, 1), (3, 2)]
def test_subsets():
# combinations
assert list(subsets([1, 2, 3], 0)) == [()]
assert list(subsets([1, 2, 3], 1)) == [(1,), (2,), (3,)]
assert list(subsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 3)) == [(1, 2, 3)]
l = list(range(4))
assert list(subsets(l, 0, repetition=True)) == [()]
assert list(subsets(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(subsets(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 1), (1, 2),
(1, 3), (2, 2), (2, 3),
(3, 3)]
assert list(subsets(l, 3, repetition=True)) == [(0, 0, 0), (0, 0, 1),
(0, 0, 2), (0, 0, 3),
(0, 1, 1), (0, 1, 2),
(0, 1, 3), (0, 2, 2),
(0, 2, 3), (0, 3, 3),
(1, 1, 1), (1, 1, 2),
(1, 1, 3), (1, 2, 2),
(1, 2, 3), (1, 3, 3),
(2, 2, 2), (2, 2, 3),
(2, 3, 3), (3, 3, 3)]
assert len(list(subsets(l, 4, repetition=True))) == 35
assert list(subsets(l[:2], 3, repetition=False)) == []
assert list(subsets(l[:2], 3, repetition=True)) == [(0, 0, 0),
(0, 0, 1),
(0, 1, 1),
(1, 1, 1)]
assert list(subsets([1, 2], repetition=True)) == \
[(), (1,), (2,), (1, 1), (1, 2), (2, 2)]
assert list(subsets([1, 2], repetition=False)) == \
[(), (1,), (2,), (1, 2)]
assert list(subsets([1, 2, 3], 2)) == \
[(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 2, repetition=True)) == \
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
def test_variations():
# permutations
l = list(range(4))
assert list(variations(l, 0, repetition=False)) == [()]
assert list(variations(l, 1, repetition=False)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=False)) == [(0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (2, 3), (3, 0), (3, 1), (3, 2)]
assert list(variations(l, 3, repetition=False)) == [(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)]
assert list(variations(l, 0, repetition=True)) == [()]
assert list(variations(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 0), (1, 1),
(1, 2), (1, 3), (2, 0),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2),
(3, 3)]
assert len(list(variations(l, 3, repetition=True))) == 64
assert len(list(variations(l, 4, repetition=True))) == 256
assert list(variations(l[:2], 3, repetition=False)) == []
assert list(variations(l[:2], 3, repetition=True)) == [
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)
]
def test_cartes():
assert list(cartes([1, 2], [3, 4, 5])) == \
[(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)]
assert list(cartes()) == [()]
assert list(cartes('a')) == [('a',)]
assert list(cartes('a', repeat=2)) == [('a', 'a')]
assert list(cartes(list(range(2)))) == [(0,), (1,)]
def test_numbered_symbols():
s = numbered_symbols(cls=Dummy)
assert isinstance(next(s), Dummy)
def test_sift():
assert sift(list(range(5)), lambda _: _ % 2) == {1: [1, 3], 0: [0, 2, 4]}
assert sift([x, y], lambda _: _.has(x)) == {False: [y], True: [x]}
assert sift([S.One], lambda _: _.has(x)) == {False: [1]}
def test_take():
X = numbered_symbols()
assert take(X, 5) == list(symbols('x0:5'))
assert take(X, 5) == list(symbols('x5:10'))
assert take([1, 2, 3, 4, 5], 5) == [1, 2, 3, 4, 5]
def test_dict_merge():
assert dict_merge({}, {1: x, y: z}) == {1: x, y: z}
assert dict_merge({1: x, y: z}, {}) == {1: x, y: z}
assert dict_merge({2: z}, {1: x, y: z}) ==
|
oihane/odoomrp-wip
|
mrp_byproduct_operations/__init__.py
|
Python
|
agpl-3.0
| 945
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos (danielcampos@avanzosc.es) Date: 29/09/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This
|
program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this
|
program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
|
rieder/MASC
|
src/amuse/ext/masc/cluster.py
|
Python
|
mit
| 10,655
| 0
|
#!/usr/bin/env python
"""
make_a_star_cluster.py creates a model star cluster,
which can then be used in N-body simulations or for other purposes.
It requires AMUSE, which can be downloaded from http://amusecode.org or
https://github.com/amusecode/amuse.
Currently not feature-complete yet, and function/argument names are
subject to change.
-- Steven Rieder steven at rieder punt nl
"""
import logging
import numpy
from amuse.units import (
units,
nbody_system,
generic_unit_converter,
)
from amuse.units.trigo import sin, cos
from amuse.datamodel.particles import Particles
from amuse.ic.plummer import new_plummer_sphere
from amuse.ic.kingmodel import new_king_model
try:
from amuse.ic.fractalcluster import new_fractal_cluster_model
except ImportError:
new_fractal_cluster_model = None
def new_masses(
stellar_mass=False,
initial_mass_function="salpeter",
upper_mass_limit=125. | units.MSun,
lower_mass_limit=0.1 | units.MSun,
number_of_stars=1024,
exceed_mass=True,
):
imf_name = initial_mass_function.lower()
if imf_name == "salpeter":
from amuse.ic.salpeter import new_salpeter_mass_distribution
initial_mass_function = new_salpeter_mass_distribution
elif imf_name == "kroupa":
from amuse.ic.brokenimf import new_kroupa_mass_distribution
initial_mass_function = new_kroupa_mass_distribution
elif imf_name == "flat":
from amuse.ic.flatimf import new_flat_mass_distribution
initial_mass_function = new_flat_mass_distribution
elif imf_name == "fixed":
from amuse.ic.flatimf import new_flat_mass_distribution
def new_fixed_mass_distribution(
number_of_particles, *list_arguments, **keyword_arguments
):
return new_flat_mass_distribution(
number_of_particles,
mass_min=stellar_mass/number_of_stars,
mass_max=stellar_mass/number_of_stars,
)
initial_mass_function = new_fixed_mass_distribution
if stellar_mass:
# best underestimate mean_mass a bit for faster results
mean_mass = 0.25 | units.MSun
mass = initial_mass_function(
int(stellar_mass / mean_mass),
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
previous_number_of_stars = len(mass)
if exceed_mass:
# Allow one final star to exceed stellar_mass
final_star = 1+numpy.argmax(mass.cumsum() > stellar_mass)
if (final_star > 1 and final_star < len(mass)):
mass = mass[:final_star]
else:
# Limit to stars not exceeding stellar_mass
mass = mass[mass.cumsum() < stellar_mass]
additional_mass = [] | units.MSun
while True:
if previous_number_of_stars + len(additional_mass) > len(mass):
break
# We don't have enough stars yet, or at least not tested this
additional_mass = initial_mass_function(
int(stellar_mass / mean_mass),
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
if exceed_mass:
# Allow one final star to exceed stellar_mass
final_star = 1+numpy.argmax(
mass.sum() + additional_mass.cumsum() > stellar_mass
)
if (final_star > 1 and final_star < len(mass)):
additional_mass = additional_mass[:final_star]
else:
# Limit to stars not exceeding stellar_mass
additional_mass = additional_mass[
mass.sum() + additional_mass.cumsum() < stellar_mass
]
mass.append(additional_mass)
number_of_stars = len(mass)
else:
# Give stars their mass
mass = initial_mass_function(
number_of_stars,
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
return mass
def new_star_cluster(
stellar_mass=False,
initial_mass_function="salpeter",
upper_mass_limit=125. | units.MSun,
lower_mass_limit=0.1 | units.MSun,
number_of_stars=1024,
effective_radius=3.0 | units.parsec,
star_distribution="plummer",
star_distribution_w0=7.0,
star_distribution_fd=2.0,
star_metallicity=0.01,
# initial_binary_fraction=0,
**kwargs
):
"""
Create stars.
When using an IMF, either the stellar mass is fixed (within
stochastic error) or the number of stars is fixed. When using
equal-mass stars, both are fixed.
"""
mass = new_masses(
stellar_mass=stellar_mass,
initial_mass_function=initial_mass_function,
upper_mass_limit=upper_mass_limit,
lower_mass_limit=lower_mass_limit,
n
|
umber_of_stars=number_of_stars,
)
total_mass = mass.sum()
number_of_stars = len(mass)
print(number_of_stars, total_mass, effective_radius)
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(
total_mass,
1. | units.kms,
effective_radius,
|
)
# Give stars a position and velocity, based on the distribution model.
if star_distribution == "plummer":
stars = new_plummer_sphere(
number_of_stars,
convert_nbody=converter,
)
elif star_distribution == "king":
stars = new_king_model(
number_of_stars,
star_distribution_w0,
convert_nbody=converter,
)
elif star_distribution == "fractal":
stars = new_fractal_cluster_model(
number_of_stars,
fractal_dimension=star_distribution_fd,
convert_nbody=converter,
)
else:
return -1, "No stellar distribution"
# set the stellar mass.
stars.mass = mass
# set other stellar parameters.
stars.metallicity = star_metallicity
# Virialize the star cluster if > 1 star
if len(stars) > 1:
stars.move_to_center()
stars.scale_to_standard(
convert_nbody=converter,
# virial_ratio=virial_ratio,
# smoothing_length_squared= ...,
)
# Record the cluster's initial parameters to the particle distribution
stars.collection_attributes.initial_mass_function = \
initial_mass_function.lower()
stars.collection_attributes.upper_mass_limit = upper_mass_limit
stars.collection_attributes.lower_mass_limit = lower_mass_limit
stars.collection_attributes.number_of_stars = number_of_stars
stars.collection_attributes.effective_radius = effective_radius
stars.collection_attributes.star_distribution = star_distribution
stars.collection_attributes.star_distribution_w0 = star_distribution_w0
stars.collection_attributes.star_distribution_fd = star_distribution_fd
stars.collection_attributes.star_metallicity = star_metallicity
# Derived/legacy values
stars.collection_attributes.converter_mass = \
converter.to_si(1 | nbody_system.mass)
stars.collection_attributes.converter_length =\
converter.to_si(1 | nbody_system.length)
stars.collection_attributes.converter_speed =\
converter.to_si(1 | nbody_system.speed)
return stars
def new_stars_from_sink(
origin,
upper_mass_limit=125 | units.MSun,
lower_mass_limit=0.1 | units.MSun,
default_radius=0.25 | units.pc,
velocity_dispersion=1 | units.kms,
logger=None,
initial_mass_function="kroupa",
distribution="random",
randomseed=None,
**keyword_arguments
):
"""
Form stars from an origin particle that keeps track of the properties of
this region.
"""
logger = logger or logging.getLogger(__name__)
if randomseed is not None:
logger.info("setting random seed to %i", randomseed)
numpy.random.seed(randomseed)
try:
initialised = origin.initialised
except AttributeError:
initialised = False
if not initialised:
logger.debug(
"In
|
googleinterns/local_global_ts_representation
|
main.py
|
Python
|
apache-2.0
| 3,904
| 0.00666
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
from gl_rep.data_loaders import airq_data_loader, simulation_loader, physionet_data_loader, har_data_loader
from gl_rep.glr import GLR
from gl_rep.models import EncoderGlobal, EncoderLocal, WindowDecoder
from gl_rep.utils import plot_reps, train_glr
import tensorflow as tf
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def main(args):
"""
Train and validate our local and global representation learning framework for different dataset
"""
is_continue = False
# Load the data and experiment configurations
with open('configs.json') as config_file:
configs = json.load(config_file)[args.data]
if args.data=='air_quality':
n_epochs = 250
lr = 1e-3
trainset, validset, testset, _ = airq_data_loader(normalize="mean_zero")
elif args.data=='simulation':
n_epochs = 100
lr = 1e-2
trainset, validset, testset, _, _ = simulation_loader(normalize="none", mask_threshold=0.0)
elif args.data == 'physionet':
n_epochs = 200
lr = 1e-3
trainset, validset, testset, _ = physionet_data_loader(normalize="mean_zero")
elif args.data=='har':
n_epochs = 150
lr = 1e-3
trainset, validset, testset, normalization_specs = har_data_loader(normalize='none')
# Create the representation learning models
zt_encoder = EncoderLocal(zl_size=configs["zl_size"], hidden_sizes=configs["glr_local_encoder_size"])
zg_encoder = EncoderGlobal(zg_size=configs["zg_size"], hidden_sizes=configs["glr_global_encoder_size"])
dec = WindowDecoder(output_size=configs["feature_size"], output_length=configs["window_size"],
hidden_sizes=configs["glr_decoder_size"])
rep_model = GLR(global_encoder=zg_encoder, local_encoder=zt_encoder, decoder=dec,
window_size=configs["window_size"], time_length=configs["t_len"],
data_dim=configs["feature_size"], kernel_scales=configs["kernel_scales"],
kernel=configs["kernels"], beta=configs["beta"], M=configs["mc_samples"], sigma=.5,
lamda=args.lamda, length_scale=configs["length_scale"], p=15)
# Train the decoupled local and global represe
|
ntation learning modules
|
if args.train:
if is_continue:
rep_model.load_weights('./ckpt/glr_%s_lambda%.1f' %(args.data, args.lamda))
train_glr(rep_model, trainset, validset, lr=lr, n_epochs=n_epochs, data=args.data)
# Plot summary performance graphs for the learning framework,
# including the representation distribution and signal reconstruction plots
rep_model.load_weights('./ckpt/glr_%s_lambda%.1f' %(args.data, args.lamda))
plot_reps(testset, rep_model, args.data)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='air_quality', help="dataset to use")
parser.add_argument('--lamda', type=float, default=1., help="regularization weight")
parser.add_argument('--train', action='store_true')
args = parser.parse_args()
main(args)
|
staudt/gimmick-generator
|
lists/generate.py
|
Python
|
mit
| 352
| 0.028409
|
filenames = ['firstNames', 'secondNames', 'famousWrestlers', 'categories', 'jobs']
for filename in filenames:
with open('%s.csv' % filename, 'r') as f:
namelist = []
for name in f.read().split('\n'):
if len(name)>1: nam
|
elist.append(name)
with open('../js/%s.js' % filename, 'w') as dest_f:
dest_f.write('%s = %s;' % (fi
|
lename, namelist))
|
Coderhypo/makinami
|
illustrious/spiders/poj.py
|
Python
|
mit
| 12,789
| 0.005943
|
#!/usr/bin/env python
# coding=utf-8
from scrapy.spiders import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import
|
LinkExtractor as link
from scrapy.http im
|
port Request, FormRequest
from scrapy.selector import Selector
from illustrious.items import ProblemItem, SolutionItem, AccountItem
from datetime import datetime
import time
LANGUAGE = {
'g++': '0',
'gcc': '1',
'java': '2',
'pascal': '3',
'c++': '4',
'c': '5',
'fortran': '6'
}
class PojInitSpider(CrawlSpider):
name = 'poj_init'
allowed_domains = ['poj.org']
start_urls = [
'http://poj.org/problemlist'
]
download_delay = 5
rules = [
Rule(
link(
allow=('problemlist\?volume=[0-9]+'),
unique=True
)
),
Rule(
link(
allow=('problem\?id=[0-9]+')
), callback='problem_item'
)
]
def problem_item(self, response):
html = response.body.\
replace('<=', ' ≤ ').\
replace(' < ', ' < ').\
replace(' > ', ' > ').\
replace('>=', ' ≥ ')
sel = Selector(text=html)
item = ProblemItem()
print response
item['oj'] = 'poj'
item['problem_id'] = response.url[-4:]
item['problem_url'] = response.url
item['title'] = sel.css('.ptt').xpath('./text()').extract()[0]
item['description'] = sel.css('.ptx').extract()[0]
item['input'] = sel.css('.ptx').extract()[1]
item['output'] = sel.css('.ptx').extract()[2]
try:
item['time_limit'] = sel.css('.plm').re('Case\sT[\S*\s]*MS')[0][21:]
except:
item['time_limit'] = sel.css('.plm').re('T[\S*\s]*MS')[0][16:]
item['memory_limit'] = sel.css('.plm').re('Me[\S*\s]*K')[0]
item['sample_input'] = sel.css('.sio').extract()[0]
item['sample_output'] = sel.css('.sio').extract()[1]
item['update_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return item
class PojProblemSpider(Spider):
name = 'poj_problem'
allowed_domains = ['poj.org']
def __init__(self, problem_id='1000', *args, **kwargs):
self.problem_id = problem_id
super(PojProblemSpider, self).__init__(*args, **kwargs)
self.start_urls = [
'http://poj.org/problem?id=%s' % problem_id
]
def parse(self, response):
html = response.body.\
replace('<=', ' ≤ ').\
replace(' < ', ' < ').\
replace(' > ', ' > ').\
replace('>=', ' ≥ ')
sel = Selector(text=html)
item = ProblemItem()
item['oj'] = 'poj'
item['problem_id'] = self.problem_id
item['problem_url'] = response.url
item['title'] = sel.css('.ptt').xpath('./text()').extract()[0]
item['description'] = sel.css('.ptx').extract()[0]
item['input'] = sel.css('.ptx').extract()[1]
item['output'] = sel.css('.ptx').extract()[2]
try:
item['time_limit'] = sel.css('.plm').re('Case\sT[\S*\s]*MS')[0][21:]
except:
item['time_limit'] = sel.css('.plm').re('T[\S*\s]*MS')[0][16:]
item['memory_limit'] = sel.css('.plm').re('Me[\S*\s]*K')[0][18:]
item['sample_input'] = sel.css('.sio').extract()[0]
item['sample_output'] = sel.css('.sio').extract()[1]
item['update_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return item
class PojSubmitSpider(CrawlSpider):
name = 'poj_submit'
allowed_domains = ['poj.org']
login_url = 'http://poj.org/login'
submit_url = 'http://poj.org/submit'
login_verify_url = 'http://poj.org/loginlog'
source = \
'I2luY2x1ZGUgPHN0ZGlvLmg+CgppbnQgbWFpbigpCnsKICAgIGludCBhLGI7CiAgICBzY2FuZigiJWQgJWQiLCZhLCAmYik7CiAgICBwcmludGYoIiVkXG4iLGErYik7CiAgICByZXR1cm4gMDsKfQ=='
start_urls = [
"http://poj.org/status"
]
download_delay = 0.5
rules = [
Rule(link(allow=('/status\?top=[0-9]+'), deny=('status\?bottom=[0-9]+')), follow=True, callback='parse_start_url')
]
is_login = False
def __init__(self,
solution_id='None',
problem_id='1000',
language='g++',
source=None,
username='sdutacm1',
password='sdutacm', *args, **kwargs):
super(PojSubmitSpider, self).__init__(*args, **kwargs)
self.solution_id = solution_id
self.username = username
self.password = password
self.problem_id = problem_id
self.language = language
if source is not None:
self.source = source
def start_requests(self):
return [FormRequest(self.login_url,
formdata = {
'user_id1': self.username,
'password1': self.password,
'B1': 'login',
},
callback = self.after_login,
)]
def after_login(self, response):
return [Request(self.login_verify_url,
callback = self.login_verify
)]
def login_verify(self, response):
if response.url == self.login_verify_url:
self.is_login = True
self.login_time = time.mktime(time.strptime(\
response.headers['Date'], \
'%a, %d %b %Y %H:%M:%S %Z')) + (8 * 60 * 60)
time.sleep(1)
return [FormRequest(self.submit_url,
formdata = {
'problem_id': self.problem_id,
'language': LANGUAGE.get(self.language, '0'),
'source': self.source,
'submit': 'Submit',
'encoded': '1'
},
callback = self.after_submit,
dont_filter = True
)]
else:
return Request(self.start_urls[0], callback=self.parse_start_url)
def after_submit(self, response):
time.sleep(3)
for url in self.start_urls:
yield self.make_requests_from_url(url)
def parse_start_url(self, response):
sel = Selector(response)
item = SolutionItem()
item['oj'] = 'poj'
item['problem_id'] = self.problem_id
item['language'] = self.language
item['solution_id'] = self.solution_id
if self.is_login:
for tr in sel.xpath('//table')[-1].xpath('.//tr')[1:]:
user = tr.xpath('.//td/a/text()').extract()[0]
_submit_time = tr.xpath('.//td/text()').extract()[-1]
if user == self.username:
item['submit_time'] = _submit_time
item['run_id'] = tr.xpath('.//td/text()').extract()[0]
try:
item['memory'] = \
tr.xpath('.//td')[4].xpath('./text()').extract()[0]
item['time'] = \
tr.xpath('.//td')[5].xpath('./text()').extract()[0]
except:
pass
item['code_length'] = tr.xpath('.//td/text()').extract()[-2]
item['result'] = tr.xpath('.//td').xpath('.//font/text()').extract()[0]
self._rules = []
return item
else:
item['result'] = 'Submit Error'
self._rules = []
return item
class PojStatusSpider(Spider):
name = 'poj_status'
allowed_domains = ['poj.org']
def __init__(self, run_id=13881167, *args, **kwargs):
|
code-google-com/cortex-vfx
|
contrib/IECoreArnold/python/IECoreArnold/__init__.py
|
Python
|
bsd-3-clause
| 1,852
| 0
|
#############################
|
#############################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
#
|
Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from _IECoreArnold import *
from UniverseBlock import UniverseBlock
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.