repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
HaebinShin/tensorflow
|
tensorflow/contrib/learn/python/learn/utils/export.py
|
1
|
7614
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.all_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.initialize_all_tables()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(init_op=control_flow_ops.group(
variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures)
export.export(export_dir, contrib_variables.get_global_step(), session,
exports_to_keep=exports_to_keep)
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behaviour of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `dict` of `Tensor`s.
Returns:
Tuple of default signature and named signature.
"""
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `dict` of `Tensor`s.
Returns:
Tuple of default classification signature and named signature.
"""
# predictions has shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
assert predictions.get_shape()[1] == 2
positive_predictions = predictions[:, 1]
signatures = {}
signatures['regression'] = exporter.regression_signature(examples,
positive_predictions)
return signatures['regression'], signatures
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `dict` of `Tensor`s.
Returns:
Tuple of default classification signature and named signature.
"""
signatures = {}
signatures['classification'] = exporter.classification_signature(
examples, classes_tensor=predictions)
return signatures['classification'], signatures
# pylint: disable=protected-access
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.
Args:
estimator: Estimator to export
export_dir: A string containing a directory to write the exported graph
and checkpoints.
signature_fn: Function that given `Tensor` of `Example` strings,
`dict` of `Tensor`s for features and `dict` of `Tensor`s for predictions
input_fn: Function that given `Tensor` of `Example` strings, parses it into
features that are then passed to the model.
and returns default and named exporting signatures.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
"""
checkpoint_path = tf_saver.latest_checkpoint(estimator._model_dir)
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
predictions = estimator._get_predict_ops(features)
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
logging.warn(
'Change warning: `signature_fn` will be required after 2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; see '
'cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
_export_graph(g, _get_saver(), checkpoint_path, export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
|
apache-2.0
| -3,426,686,088,125,877,000
| 38.046154
| 82
| 0.678356
| false
| 4.373349
| false
| false
| false
|
iLampard/alphaware
|
alphaware/tests/utils/test_pandas_utils.py
|
1
|
6717
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from parameterized import parameterized
import pandas as pd
import numpy as np
from numpy.testing.utils import assert_array_equal
from pandas import (MultiIndex,
Index)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from alphaware.enums import OutputDataFormat, FreqType
from alphaware.const import INDEX_FACTOR
from alphaware.utils import (convert_df_format,
top,
group_by_freq,
fwd_return,
weighted_rank)
from datetime import datetime as dt
class TestPandasUtils(TestCase):
@parameterized.expand([(pd.DataFrame({'001': [1, 2, 3], '002': [2, 3, 4]}, index=['2014', '2015', '2016']),
OutputDataFormat.MULTI_INDEX_DF,
'test_factor',
INDEX_FACTOR,
pd.DataFrame(index=MultiIndex(levels=[['2014', '2015', '2016'], ['001', '002']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=['trade_date', 'ticker']),
data=[1, 2, 2, 3, 3, 4],
columns=['test_factor']))])
def test_convert_df_format_1(self, data, target_format, col_name, multi_index, expected):
calculated = convert_df_format(data, target_format, col_name, multi_index)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame(
index=MultiIndex.from_product([['2014', '2015', '2016'], ['001', '002']], names=['trade_date', 'ticker']),
data=[1, 2, 3, 4, 5, 6],
columns=['factor']),
OutputDataFormat.PITVOT_TABLE_DF,
'factor',
INDEX_FACTOR,
pd.DataFrame({'001': [1, 3, 5], '002': [2, 4, 6]},
index=Index(['2014', '2015', '2016'], name='trade_date')))])
def test_convert_df_format_2(self, data, target_format, col_name, multi_index, expected):
calculated = convert_df_format(data, target_format, col_name, multi_index)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame(data=[[1, 23, 4, 5], [4, 5, 7, 8], [10, 5, 11, 8], [34, 65, 27, 78]],
columns=['A', 'B', 'C', 'D']),
2,
['A'],
pd.DataFrame(data=[[34, 65, 27, 78], [10, 5, 11, 8]], index=[3, 2], columns=['A', 'B', 'C', 'D'])
)])
def test_top_1(self, data, n, column, expected):
calculated = top(data, column=column, n=n)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.Series(data=[35, 12, 45, 79, 123, 74, 35]),
3,
pd.Series(data=[123, 79, 74], index=[4, 3, 5])
)])
def test_top_2(self, data, n, expected):
calculated = top(data, n=n)
assert_series_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame(data=[1, 2, 3, 4, 5, 6, 7, 9, 0, 12],
index=[dt(2017, 7, 1), dt(2017, 6, 1), dt(2017, 7, 2), dt(2017, 6, 1), dt(2017, 3, 1),
dt(2017, 3, 1), dt(2017, 1, 1), dt(2017, 2, 1), dt(2017, 1, 1), dt(2017, 2, 1)]),
dt(2017, 7, 31),
FreqType.EOM,
pd.DataFrame(data=[1, 3], index=[dt(2017, 7, 1), dt(2017, 7, 2)])
),
(pd.Series(data=[1, 2, 3, 4, 5, 6, 7, 9, 0, 12],
index=[dt(2016, 7, 1), dt(2016, 6, 1), dt(2017, 7, 2), dt(2017, 7, 1), dt(2017, 3, 1),
dt(2017, 3, 1), dt(2017, 1, 1), dt(2017, 2, 1), dt(2017, 1, 1), dt(2017, 2, 1)]),
dt(2016, 12, 31),
FreqType.EOY,
pd.DataFrame(data=[2, 1], index=[dt(2016, 6, 1), dt(2016, 7, 1)])
),
(pd.Series(data=[1, 2, 3, 4, 5, 6, 7, 9, 0, 12],
index=[dt(2016, 7, 1), dt(2016, 7, 1), dt(2017, 7, 2), dt(2017, 7, 1), dt(2017, 3, 1),
dt(2017, 3, 1), dt(2017, 1, 1), dt(2017, 2, 1), dt(2017, 1, 1), dt(2017, 2, 1)]),
(2016, 7, 1),
FreqType.EOD,
pd.DataFrame(data=[1, 2], index=[dt(2016, 7, 1), dt(2016, 7, 1)])
)
])
def test_group_by_freq(self, data, group, freq, expected):
calculated = group_by_freq(data, freq=freq).get_group(group)
assert_frame_equal(calculated, expected)
@parameterized.expand([(pd.Series(data=[1, 2, 3, 4],
index=pd.MultiIndex.from_product([[dt(2014, 1, 30), dt(2014, 2, 28)], ['a', 'b']],
names=['trade_date', 'ticker'])),
1,
pd.DataFrame(data=[3, 4],
index=pd.MultiIndex.from_product([[dt(2014, 1, 30)], ['a', 'b']],
names=['trade_date', 'ticker']),
columns=['fwd_return'])
),
(pd.DataFrame(data=[1, 2, 3, 4, 5, 6],
index=pd.MultiIndex.from_product(
[[dt(2014, 1, 30), dt(2014, 2, 28), dt(2014, 3, 30)], ['a', 'b']],
names=['trade_date', 'ticker'])),
2,
pd.DataFrame(data=[5, 6],
index=pd.MultiIndex.from_product([[dt(2014, 1, 30)], ['a', 'b']],
names=['trade_date', 'ticker']),
columns=['fwd_return'])
)
])
def test_fwd_return(self, data, period, expected):
calculated = fwd_return(data, period=period)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame({'a': [1, 2, 3], 'b': [2, 4, 6]}), [1, 1], None, True, pd.DataFrame([0.0, 1.0, 2.0])),
(pd.DataFrame({'a': [1, 2, 3], 'b': [2, 4, 6]}), [1, 0], [0.6, 0.4], False, np.array([0.8, 1.0, 1.2]))])
def test_weighted_rank(self, data, order, weight, out_df, expected):
calculated = weighted_rank(data, order, weight, out_df)
if isinstance(expected, pd.DataFrame):
assert_frame_equal(calculated, expected)
else:
assert_array_equal(calculated, expected)
|
apache-2.0
| -7,518,428,269,691,289,000
| 51.476563
| 120
| 0.450648
| false
| 3.632774
| true
| false
| false
|
citrix-openstack-build/ironic
|
ironic/drivers/modules/fake.py
|
1
|
2494
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake driver interfaces used in testing.
"""
from ironic.common import exception
from ironic.common import states
from ironic.drivers import base
class FakePower(base.PowerInterface):
"""Example implementation of a simple power interface."""
def validate(self, node):
return True
def get_power_state(self, task, node):
return states.NOSTATE
def set_power_state(self, task, node, power_state):
pass
def reboot(self, task, node):
pass
class FakeDeploy(base.DeployInterface):
"""Example imlementation of a deploy interface that uses a
separate power interface.
"""
def validate(self, node):
return True
def deploy(self, task, node):
pass
def tear_down(self, task, node):
pass
class FakeVendor(base.VendorInterface):
"""Example implementation of a vendor passthru interface."""
def validate(self, node, **kwargs):
method = kwargs.get('method')
if not method:
raise exception.InvalidParameterValue(_(
"Invalid vendor passthru, no 'method' specified."))
if method == 'foo':
bar = kwargs.get('bar')
if not bar:
raise exception.InvalidParameterValue(_(
"Parameter not passed to Ironic."))
else:
raise exception.InvalidParameterValue(_(
"Unsupported method (%s) passed through to vendor extension.")
% method)
return True
def _foo(self, task, node, bar):
return True if bar == 'baz' else False
def vendor_passthru(self, task, node, **kwargs):
method = kwargs.get('method')
if method == 'foo':
bar = kwargs.get('bar')
return self._foo(task, node, bar)
|
apache-2.0
| -7,277,306,020,164,892,000
| 28
| 78
| 0.639134
| false
| 4.352531
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/aio/_configuration.py
|
1
|
3218
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ResourceManagementClientConfiguration(Configuration):
"""Configuration for ResourceManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ResourceManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2018-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
mit
| -4,927,705,798,240,208,000
| 47.029851
| 134
| 0.682101
| false
| 4.558074
| true
| false
| false
|
nanobox-io/nanobox-adapter-libcloud
|
nanobox_libcloud/controllers/meta.py
|
1
|
3030
|
from flask import render_template, request
from nanobox_libcloud import app
from nanobox_libcloud.adapters import get_adapter
from nanobox_libcloud.adapters.base import AdapterBase
from nanobox_libcloud.utils import output
# Overview and usage endpoints, to explain how this meta-adapter works
@app.route('/', methods=['GET'])
def overview():
"""Provides an overview of the libcloud meta-adapter, and how to use it, in the most general sense."""
adapters = sorted(AdapterBase.registry.keys())
return render_template("overview.html", adapters=adapters)
@app.route('/docs', methods=['GET'])
def docs():
"""Loads Swagger UI with all the supported adapters' OpenAPI Spec Files pre-loaded into the Topbar for exploration."""
adapters = sorted(AdapterBase.registry.keys())
return render_template("docs.html", adapters=adapters)
@app.route('/<adapter_id>', methods=['GET'])
def usage(adapter_id):
"""Provides usage info for a certain adapter, and how to use it, in a more specific sense."""
adapter = get_adapter(adapter_id)
if not adapter:
return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501)
return render_template("usage.html", adapter=adapter)
@app.route('/<adapter_id>/docs', methods=['GET'])
def adapter_docs(adapter_id):
"""Loads Swagger UI with a certain adapter's OpenAPI Spec File pre-loaded."""
return render_template("docs.html", adapters=[adapter_id])
# Actual metadata endpoints for the Nanobox Provider Adapter API
@app.route('/<adapter_id>/meta', methods=['GET'])
def meta(adapter_id):
"""Provides the metadata for a certain adapter."""
adapter = get_adapter(adapter_id)
if not adapter:
return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501)
return output.success(adapter.do_meta())
@app.route('/<adapter_id>/catalog', methods=['GET'])
def catalog(adapter_id):
"""Provides the catalog data for a certain adapter."""
adapter = get_adapter(adapter_id)
if not adapter:
return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501)
result = adapter.do_catalog(request.headers)
if not isinstance(result, list):
return output.failure('%d: %s' % (result.code, result.message) if hasattr(result, 'code') and hasattr(result, 'message') else repr(result), 500)
return output.success(result)
@app.route('/<adapter_id>/verify', methods=['POST'])
def verify(adapter_id):
"""Verifies user credentials for a certain adapter."""
adapter = get_adapter(adapter_id)
if not adapter:
return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501)
result = adapter.do_verify(request.headers)
if result is not True:
return output.failure("Credential verification failed. Please check your credentials and try again. (Error %s)" % (result), 401)
return ""
|
mit
| -6,108,083,656,066,812,000
| 36.407407
| 152
| 0.705941
| false
| 3.955614
| false
| false
| false
|
dpausp/pyrailway
|
pyrailway/operation.py
|
1
|
2267
|
class Operation:
def __init__(self, *stations):
self.stations = stations
def __call__(self, params=None, **dependencies):
options = dict(params=(params or {}), **dependencies)
success = True
for station in self.stations:
if (success and station.runs_on_success) or (not success and station.runs_on_failure):
success = station(options, dependencies)
if success == FailFast:
return Result(False, options)
return Result(success, options)
class Result:
def __init__(self, success, result_data):
self.result_data = result_data
self.success = success
@property
def failure(self):
return not self.success
def __getitem__(self, key):
return self.result_data[key]
def __contains__(self, key):
return key in self.result_data
def get(self, key):
return self.result_data.get(key)
class FailFast:
pass
class Activity:
runs_on_success = False
runs_on_failure = False
def __init__(self, func, name=None):
self.func = func
self.name = name
def callfunc(self, options, dependencies):
params = options["params"]
return self.func(options=options, params=params, **dependencies)
def __call__(self, options, dependencies):
self.callfunc(options, dependencies)
return True
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.name or self.func.__name__)
class step(Activity):
runs_on_success = True
def __init__(self, func, name=None, fail_fast=False):
super().__init__(func, name)
self.fail_fast = fail_fast
def __call__(self, options, dependencies):
res = self.callfunc(options, dependencies)
success = bool(res)
if not success and self.fail_fast:
return FailFast
return success
class failure(Activity):
runs_on_failure = True
def __call__(self, options, dependencies):
self.callfunc(options, dependencies)
return False
class success(Activity):
runs_on_success = True
|
mit
| -8,869,891,410,881,496,000
| 24.188889
| 99
| 0.574327
| false
| 4.277358
| false
| false
| false
|
cbitstech/Purple-Robot-Django
|
management/commands/extractors/builtin_rawlocationprobe.py
|
1
|
3339
|
# pylint: disable=line-too-long
import datetime
import psycopg2
import pytz
CREATE_PROBE_TABLE_SQL = 'CREATE TABLE builtin_rawlocationprobe(id SERIAL PRIMARY KEY, user_id TEXT, guid TEXT, timestamp BIGINT, utc_logged TIMESTAMP, latitude DOUBLE PRECISION, longitude DOUBLE PRECISION, altitude DOUBLE PRECISION, accuracy DOUBLE PRECISION, provider TEXT, network_available BOOLEAN, gps_available BOOLEAN);'
CREATE_PROBE_USER_ID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobe(user_id);'
CREATE_PROBE_GUID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobe(guid);'
CREATE_PROBE_UTC_LOGGED_INDEX = 'CREATE INDEX ON builtin_rawlocationprobe(utc_logged);'
def exists(connection_str, user_id, reading):
conn = psycopg2.connect(connection_str)
if probe_table_exists(conn) is False:
conn.close()
return False
cursor = conn.cursor()
cursor.execute('SELECT id FROM builtin_rawlocationprobe WHERE (user_id = %s AND guid = %s);', (user_id, reading['GUID']))
row_exists = (cursor.rowcount > 0)
cursor.close()
conn.close()
return row_exists
def probe_table_exists(conn):
cursor = conn.cursor()
cursor.execute('SELECT table_name FROM information_schema.tables WHERE (table_schema = \'public\' AND table_name = \'builtin_rawlocationprobe\')')
table_exists = (cursor.rowcount > 0)
cursor.close()
return table_exists
def insert(connection_str, user_id, reading, check_exists=True):
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
if check_exists and probe_table_exists(conn) is False:
cursor.execute(CREATE_PROBE_TABLE_SQL)
cursor.execute(CREATE_PROBE_USER_ID_INDEX)
cursor.execute(CREATE_PROBE_GUID_INDEX)
cursor.execute(CREATE_PROBE_UTC_LOGGED_INDEX)
conn.commit()
reading_cmd = 'INSERT INTO builtin_rawlocationprobe(user_id, ' + \
'guid, ' + \
'timestamp, ' + \
'utc_logged, ' + \
'latitude, ' + \
'longitude, ' + \
'altitude, ' + \
'accuracy, ' + \
'provider, ' + \
'network_available, ' + \
'gps_available) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id;'
values = [user_id, reading['GUID'], reading['TIMESTAMP'], datetime.datetime.fromtimestamp(reading['TIMESTAMP'], tz=pytz.utc), reading['LATITUDE'], reading['LONGITUDE']]
if 'ALTITUDE' in reading:
values.append(reading['ALTITUDE'])
else:
values.append(None)
values.append(reading['ACCURACY'])
values.append(reading['PROVIDER'])
if 'NETWORK_AVAILABLE' in reading:
values.append(reading['NETWORK_AVAILABLE'])
else:
values.append(None)
if 'GPS_AVAILABLE' in reading:
values.append(reading['GPS_AVAILABLE'])
else:
values.append(None)
cursor.execute(reading_cmd, values)
conn.commit()
cursor.close()
conn.close()
|
gpl-3.0
| -5,321,546,172,391,040,000
| 35.293478
| 327
| 0.575621
| false
| 4.107011
| false
| false
| false
|
teamtaverna/core
|
app/api/tests/utils.py
|
1
|
1160
|
from base64 import b64encode
from django.contrib.auth.models import User
admin_test_credentials = ('admin1', 'admin@taverna.com', 'qwerty123',)
normal_user_credentials = ('user1', 'user1@taverna.com', 'qwerty123',)
endpoint = '/api'
def obtain_api_key(client):
credentials = '{}:{}'.format(
admin_test_credentials[0],
admin_test_credentials[2]
)
b64_encoded_credentials = b64encode(credentials.encode('utf-8'))
return client.post(
'/api/api_key',
**{'HTTP_AUTHORIZATION': 'Basic %s' % b64_encoded_credentials.decode('utf-8')}
).json()['api_key']
def create_admin_account():
return User.objects.create_superuser(*admin_test_credentials)
def create_normal_user_acount():
return User.objects.create_user(*normal_user_credentials)
def make_request(client, query, method='GET'):
header = {
'HTTP_X_TAVERNATOKEN': obtain_api_key(client)
}
if method == 'GET':
return client.get(endpoint, data={'query': query}, **header).json()
if method == 'POST':
return client.post(endpoint, data={'query': query}, **header).json()
|
mit
| -72,517,511,289,812,450
| 28
| 90
| 0.631034
| false
| 3.483483
| false
| false
| false
|
canihavesomecoffee/sample-platform
|
decorators.py
|
1
|
4344
|
"""define decorators for use across app."""
from datetime import date
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Union
from flask import g, render_template, request
from database import EnumSymbol
from mod_auth.models import User
def get_menu_entries(user: Optional[User], title: str, icon: str, access: Optional[List] = None, route: str = '',
all_entries: Optional[List[Dict[str, Union[str, List[EnumSymbol]]]]] = None) -> Dict[Any, Any]:
"""
Parse a given set of entries and checks which ones the user can access.
:param access: Grant access to these roles. Empty means public access.
:type access: list[str]
:param user: The user object.
:type user: mod_auth.models.User
:param title: The title of the root menu entry.
:type title: str
:param icon: The icon of the root menu entry.
:type icon: str
:param route: The route of the root menu entry.
:type route: str
:param all_entries: The sub entries for this menu entry.
:type all_entries: list[dict]
:return: A dict consisting of the menu entry.
:rtype: dict
"""
if all_entries is None:
all_entries = []
if access is None:
access = []
result: Dict[Any, Any] = {
'title': title,
'icon': icon
}
allowed_entries = []
passed = False
if user is not None:
if len(route) > 0:
result['route'] = route
passed = len(access) == 0 or user.role in access
else:
for entry in all_entries:
# TODO: make this recursive if necessary
if len(entry['access']) == 0 or user.role in entry['access']:
allowed_entries.append(entry)
if len(allowed_entries) > 0:
result['entries'] = allowed_entries
passed = True
elif len(access) == 0:
if len(route) > 0:
result['route'] = route
passed = True
else:
for entry in all_entries:
# TODO: make this recursive if necessary
if len(entry['access']) == 0:
allowed_entries.append(entry)
if len(allowed_entries) > 0:
result['entries'] = allowed_entries
passed = True
return result if passed else {}
def template_renderer(template: Optional[str] = None, status: int = 200) -> Callable:
"""
Decorate to render a template.
:param template: The template if it's not equal to the name of the endpoint.
:type template: str
:param status: The return code
:type status: int
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint.replace('.', '/') + '.html'
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
elif not isinstance(ctx, dict):
return ctx
# Add default values
ctx['applicationName'] = 'CCExtractor CI platform'
ctx['applicationVersion'] = getattr(g, 'version', 'Unknown')
ctx['currentYear'] = date.today().strftime('%Y')
try:
from build_commit import build_commit
except ImportError:
build_commit = 'Unknown'
ctx['build_commit'] = build_commit
user = getattr(g, 'user', None)
ctx['user'] = user
# Create menu entries
menu_entries = getattr(g, 'menu_entries', {})
ctx['menu'] = [
menu_entries.get('home', {}),
menu_entries.get('samples', {}),
menu_entries.get('upload', {}),
menu_entries.get('custom', {}),
menu_entries.get('tests', {}),
menu_entries.get('regression', {}),
menu_entries.get('config', {}),
menu_entries.get('account', {}),
menu_entries.get('auth', {})
]
ctx['active_route'] = request.endpoint
# Render template & return
return render_template(template_name, **ctx), status
return decorated_function
return decorator
|
isc
| 5,094,530,706,758,486,000
| 34.317073
| 116
| 0.548803
| false
| 4.365829
| false
| false
| false
|
hgiemza/DIRAC
|
WorkloadManagementSystem/Agent/StalledJobAgent.py
|
2
|
20827
|
########################################################################
# File : StalledJobAgent.py
########################################################################
""" The StalledJobAgent hunts for stalled jobs in the Job database. Jobs in "running"
state not receiving a heart beat signal for more than stalledTime
seconds will be assigned the "Stalled" state.
"""
__RCSID__ = "$Id$"
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Time import fromString, toEpoch, dateTime, second
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.AccountingSystem.Client.Types.Job import Job
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.ConfigurationSystem.Client.Helpers import cfgPath
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
import types
class StalledJobAgent( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
jobDB = None
logDB = None
matchedTime = 7200
rescheduledTime = 600
completedTime = 86400
#############################################################################
def initialize( self ):
"""Sets default parameters
"""
self.jobDB = JobDB()
self.logDB = JobLoggingDB()
self.am_setOption( 'PollingTime', 60 * 60 )
self.stalledJobsTolerantSites = self.am_getOption( 'StalledJobsTolerantSites', [] )
if not self.am_getOption( 'Enable', True ):
self.log.info( 'Stalled Job Agent running in disabled mode' )
return S_OK()
#############################################################################
def execute( self ):
""" The main agent execution method
"""
self.log.verbose( 'Waking up Stalled Job Agent' )
wms_instance = getSystemInstance( 'WorkloadManagement' )
if not wms_instance:
return S_ERROR( 'Can not get the WorkloadManagement system instance' )
wrapperSection = cfgPath( 'Systems', 'WorkloadManagement', wms_instance, 'JobWrapper' )
stalledTime = self.am_getOption( 'StalledTimeHours', 2 )
failedTime = self.am_getOption( 'FailedTimeHours', 6 )
self.stalledJobsToleranceTime = self.am_getOption( 'StalledJobsToleranceTime', 0 )
self.matchedTime = self.am_getOption( 'MatchedTime', self.matchedTime )
self.rescheduledTime = self.am_getOption( 'RescheduledTime', self.rescheduledTime )
self.completedTime = self.am_getOption( 'CompletedTime', self.completedTime )
self.log.verbose( 'StalledTime = %s cycles' % ( stalledTime ) )
self.log.verbose( 'FailedTime = %s cycles' % ( failedTime ) )
watchdogCycle = gConfig.getValue( cfgPath( wrapperSection , 'CheckingTime' ), 30 * 60 )
watchdogCycle = max( watchdogCycle, gConfig.getValue( cfgPath( wrapperSection , 'MinCheckingTime' ), 20 * 60 ) )
# Add half cycle to avoid race conditions
stalledTime = watchdogCycle * ( stalledTime + 0.5 )
failedTime = watchdogCycle * ( failedTime + 0.5 )
result = self.__markStalledJobs( stalledTime )
if not result['OK']:
self.log.error( 'Failed to detect stalled jobs', result['Message'] )
# Note, jobs will be revived automatically during the heartbeat signal phase and
# subsequent status changes will result in jobs not being selected by the
# stalled job agent.
result = self.__failStalledJobs( failedTime )
if not result['OK']:
self.log.error( 'Failed to process stalled jobs', result['Message'] )
result = self.__failCompletedJobs()
if not result['OK']:
self.log.error( 'Failed to process completed jobs', result['Message'] )
result = self.__kickStuckJobs()
if not result['OK']:
self.log.error( 'Failed to kick stuck jobs', result['Message'] )
return S_OK( 'Stalled Job Agent cycle complete' )
#############################################################################
def __markStalledJobs( self, stalledTime ):
""" Identifies stalled jobs running without update longer than stalledTime.
"""
stalledCounter = 0
runningCounter = 0
result = self.jobDB.selectJobs( {'Status':'Running'} )
if not result['OK']:
return result
if not result['Value']:
return S_OK()
jobs = result['Value']
self.log.info( '%s Running jobs will be checked for being stalled' % ( len( jobs ) ) )
jobs.sort()
# jobs = jobs[:10] #for debugging
for job in jobs:
site = self.jobDB.getJobAttribute( job, 'site' )['Value']
if site in self.stalledJobsTolerantSites:
result = self.__getStalledJob( job, stalledTime + self.stalledJobsToleranceTime )
else:
result = self.__getStalledJob( job, stalledTime )
if result['OK']:
self.log.verbose( 'Updating status to Stalled for job %s' % ( job ) )
self.__updateJobStatus( job, 'Stalled' )
stalledCounter += 1
else:
self.log.verbose( result['Message'] )
runningCounter += 1
self.log.info( 'Total jobs: %s, Stalled job count: %s, Running job count: %s' %
( len( jobs ), stalledCounter, runningCounter ) )
return S_OK()
#############################################################################
def __failStalledJobs( self, failedTime ):
""" Changes the Stalled status to Failed for jobs long in the Stalled status
"""
result = self.jobDB.selectJobs( {'Status':'Stalled'} )
if not result['OK']:
return result
jobs = result['Value']
failedCounter = 0
minorStalledStatuses = ( "Job stalled: pilot not running", 'Stalling for more than %d sec' % failedTime )
if jobs:
self.log.info( '%s Stalled jobs will be checked for failure' % ( len( jobs ) ) )
for job in jobs:
setFailed = False
# Check if the job pilot is lost
result = self.__getJobPilotStatus( job )
if not result['OK']:
self.log.error( 'Failed to get pilot status', result['Message'] )
continue
pilotStatus = result['Value']
if pilotStatus != "Running":
setFailed = minorStalledStatuses[0]
else:
result = self.__getLatestUpdateTime( job )
if not result['OK']:
self.log.error( 'Failed to get job update time', result['Message'] )
continue
elapsedTime = toEpoch() - result['Value']
if elapsedTime > failedTime:
setFailed = minorStalledStatuses[1]
# Set the jobs Failed, send them a kill signal in case they are not really dead and send accounting info
if setFailed:
# Send a kill signal to the job such that it cannot continue running
WMSClient().killJob( job )
self.__updateJobStatus( job, 'Failed', setFailed )
failedCounter += 1
result = self.__sendAccounting( job )
if not result['OK']:
self.log.error( 'Failed to send accounting', result['Message'] )
recoverCounter = 0
for minor in minorStalledStatuses:
result = self.jobDB.selectJobs( {'Status':'Failed', 'MinorStatus': minor, 'AccountedFlag': 'False' } )
if not result['OK']:
return result
if result['Value']:
jobs = result['Value']
self.log.info( '%s Stalled jobs will be Accounted' % ( len( jobs ) ) )
for job in jobs:
result = self.__sendAccounting( job )
if not result['OK']:
self.log.error( 'Failed to send accounting', result['Message'] )
continue
recoverCounter += 1
if not result['OK']:
break
if failedCounter:
self.log.info( '%d jobs set to Failed' % failedCounter )
if recoverCounter:
self.log.info( '%d jobs properly Accounted' % recoverCounter )
return S_OK( failedCounter )
#############################################################################
def __getJobPilotStatus( self, jobID ):
""" Get the job pilot status
"""
result = self.jobDB.getJobParameter( jobID, 'Pilot_Reference' )
if not result['OK']:
return result
if not result['Value']:
# There is no pilot reference, hence its status is unknown
return S_OK( 'NoPilot' )
pilotReference = result['Value']
wmsAdminClient = RPCClient( 'WorkloadManagement/WMSAdministrator' )
result = wmsAdminClient.getPilotInfo( pilotReference )
if not result['OK']:
if "No pilots found" in result['Message']:
self.log.warn( result['Message'] )
return S_OK( 'NoPilot' )
self.log.error( 'Failed to get pilot information',
'for job %d: ' % jobID + result['Message'] )
return S_ERROR( 'Failed to get the pilot status' )
pilotStatus = result['Value'][pilotReference]['Status']
return S_OK( pilotStatus )
#############################################################################
def __getStalledJob( self, job, stalledTime ):
""" Compares the most recent of LastUpdateTime and HeartBeatTime against
the stalledTime limit.
"""
result = self.__getLatestUpdateTime( job )
if not result['OK']:
return result
currentTime = toEpoch()
lastUpdate = result['Value']
elapsedTime = currentTime - lastUpdate
self.log.verbose( '(CurrentTime-LastUpdate) = %s secs' % ( elapsedTime ) )
if elapsedTime > stalledTime:
self.log.info( 'Job %s is identified as stalled with last update > %s secs ago' % ( job, elapsedTime ) )
return S_OK( 'Stalled' )
return S_ERROR( 'Job %s is running and will be ignored' % job )
#############################################################################
def __getLatestUpdateTime( self, job ):
""" Returns the most recent of HeartBeatTime and LastUpdateTime
"""
result = self.jobDB.getJobAttributes( job, ['HeartBeatTime', 'LastUpdateTime'] )
if not result['OK']:
self.log.error( 'Failed to get job attributes', result['Message'] )
if not result['OK'] or not result['Value']:
self.log.error( 'Could not get attributes for job', '%s' % job )
return S_ERROR( 'Could not get attributes for job' )
self.log.verbose( result )
latestUpdate = 0
if not result['Value']['HeartBeatTime'] or result['Value']['HeartBeatTime'] == 'None':
self.log.verbose( 'HeartBeatTime is null for job %s' % job )
else:
latestUpdate = toEpoch( fromString( result['Value']['HeartBeatTime'] ) )
if not result['Value']['LastUpdateTime'] or result['Value']['LastUpdateTime'] == 'None':
self.log.verbose( 'LastUpdateTime is null for job %s' % job )
else:
lastUpdate = toEpoch( fromString( result['Value']['LastUpdateTime'] ) )
if latestUpdate < lastUpdate:
latestUpdate = lastUpdate
if not latestUpdate:
return S_ERROR( 'LastUpdate and HeartBeat times are null for job %s' % job )
else:
self.log.verbose( 'Latest update time from epoch for job %s is %s' % ( job, latestUpdate ) )
return S_OK( latestUpdate )
#############################################################################
def __updateJobStatus( self, job, status, minorstatus = None ):
""" This method updates the job status in the JobDB, this should only be
used to fail jobs due to the optimizer chain.
"""
self.log.verbose( "self.jobDB.setJobAttribute(%s,'Status','%s',update=True)" % ( job, status ) )
if self.am_getOption( 'Enable', True ):
result = self.jobDB.setJobAttribute( job, 'Status', status, update = True )
else:
result = S_OK( 'DisabledMode' )
if result['OK']:
if minorstatus:
self.log.verbose( "self.jobDB.setJobAttribute(%s,'MinorStatus','%s',update=True)" % ( job, minorstatus ) )
result = self.jobDB.setJobAttribute( job, 'MinorStatus', minorstatus, update = True )
if not minorstatus: # Retain last minor status for stalled jobs
result = self.jobDB.getJobAttributes( job, ['MinorStatus'] )
if result['OK']:
minorstatus = result['Value']['MinorStatus']
logStatus = status
result = self.logDB.addLoggingRecord( job, status = logStatus, minor = minorstatus, source = 'StalledJobAgent' )
if not result['OK']:
self.log.warn( result )
return result
def __getProcessingType( self, jobID ):
""" Get the Processing Type from the JDL, until it is promoted to a real Attribute
"""
processingType = 'unknown'
result = self.jobDB.getJobJDL( jobID, original = True )
if not result['OK']:
return processingType
classAdJob = ClassAd( result['Value'] )
if classAdJob.lookupAttribute( 'ProcessingType' ):
processingType = classAdJob.getAttributeString( 'ProcessingType' )
return processingType
#############################################################################
def __sendAccounting( self, jobID ):
""" Send WMS accounting data for the given job
"""
try:
accountingReport = Job()
endTime = 'Unknown'
lastHeartBeatTime = 'Unknown'
result = self.jobDB.getJobAttributes( jobID )
if not result['OK']:
return result
jobDict = result['Value']
startTime, endTime = self.__checkLoggingInfo( jobID, jobDict )
lastCPUTime, lastWallTime, lastHeartBeatTime = self.__checkHeartBeat( jobID, jobDict )
lastHeartBeatTime = fromString( lastHeartBeatTime )
if lastHeartBeatTime is not None and lastHeartBeatTime > endTime:
endTime = lastHeartBeatTime
cpuNormalization = self.jobDB.getJobParameter( jobID, 'CPUNormalizationFactor' )
if not cpuNormalization['OK'] or not cpuNormalization['Value']:
cpuNormalization = 0.0
else:
cpuNormalization = float( cpuNormalization['Value'] )
except Exception:
self.log.exception( "Exception in __sendAccounting for job %s: endTime=%s, lastHBTime %s" % ( str( jobID ), str( endTime ), str( lastHeartBeatTime ) ), '' , False )
return S_ERROR( "Exception" )
processingType = self.__getProcessingType( jobID )
accountingReport.setStartTime( startTime )
accountingReport.setEndTime( endTime )
# execTime = toEpoch( endTime ) - toEpoch( startTime )
# Fill the accounting data
acData = { 'Site' : jobDict['Site'],
'User' : jobDict['Owner'],
'UserGroup' : jobDict['OwnerGroup'],
'JobGroup' : jobDict['JobGroup'],
'JobType' : jobDict['JobType'],
'JobClass' : jobDict['JobSplitType'],
'ProcessingType' : processingType,
'FinalMajorStatus' : 'Failed',
'FinalMinorStatus' : 'Stalled',
'CPUTime' : lastCPUTime,
'NormCPUTime' : lastCPUTime * cpuNormalization,
'ExecTime' : lastWallTime,
'InputDataSize' : 0.0,
'OutputDataSize' : 0.0,
'InputDataFiles' : 0,
'OutputDataFiles' : 0,
'DiskSpace' : 0.0,
'InputSandBoxSize' : 0.0,
'OutputSandBoxSize' : 0.0,
'ProcessedEvents' : 0
}
# For accidentally stopped jobs ExecTime can be not set
if not acData['ExecTime']:
acData['ExecTime'] = acData['CPUTime']
elif acData['ExecTime'] < acData['CPUTime']:
acData['ExecTime'] = acData['CPUTime']
self.log.verbose( 'Accounting Report is:' )
self.log.verbose( acData )
accountingReport.setValuesFromDict( acData )
result = accountingReport.commit()
if result['OK']:
self.jobDB.setJobAttribute( jobID, 'AccountedFlag', 'True' )
else:
self.log.error( 'Failed to send accounting report', 'Job: %d, Error: %s' % ( int( jobID ), result['Message'] ) )
return result
def __checkHeartBeat( self, jobID, jobDict ):
""" Get info from HeartBeat
"""
result = self.jobDB.getHeartBeatData( jobID )
lastCPUTime = 0
lastWallTime = 0
lastHeartBeatTime = jobDict['StartExecTime']
if lastHeartBeatTime == "None":
lastHeartBeatTime = 0
if result['OK']:
for name, value, heartBeatTime in result['Value']:
if 'CPUConsumed' == name:
try:
value = int( float( value ) )
if value > lastCPUTime:
lastCPUTime = value
except ValueError:
pass
if 'WallClockTime' == name:
try:
value = int( float( value ) )
if value > lastWallTime:
lastWallTime = value
except ValueError:
pass
if heartBeatTime > lastHeartBeatTime:
lastHeartBeatTime = heartBeatTime
return lastCPUTime, lastWallTime, lastHeartBeatTime
def __checkLoggingInfo( self, jobID, jobDict ):
""" Get info from JobLogging
"""
logList = []
result = self.logDB.getJobLoggingInfo( jobID )
if result['OK']:
logList = result['Value']
startTime = jobDict['StartExecTime']
if not startTime or startTime == 'None':
# status, minor, app, stime, source
for items in logList:
if items[0] == 'Running':
startTime = items[3]
break
if not startTime or startTime == 'None':
startTime = jobDict['SubmissionTime']
if type( startTime ) in types.StringTypes:
startTime = fromString( startTime )
if startTime == None:
self.log.error( 'Wrong timestamp in DB', items[3] )
startTime = dateTime()
endTime = dateTime()
# status, minor, app, stime, source
for items in logList:
if items[0] == 'Stalled':
endTime = fromString( items[3] )
if endTime == None:
self.log.error( 'Wrong timestamp in DB', items[3] )
endTime = dateTime()
return startTime, endTime
def __kickStuckJobs( self ):
""" Reschedule jobs stuck in initialization status Rescheduled, Matched
"""
message = ''
checkTime = str( dateTime() - self.matchedTime * second )
result = self.jobDB.selectJobs( {'Status':'Matched'}, older = checkTime )
if not result['OK']:
self.log.error( 'Failed to select jobs', result['Message'] )
return result
jobIDs = result['Value']
if jobIDs:
self.log.info( 'Rescheduling %d jobs stuck in Matched status' % len( jobIDs ) )
result = self.jobDB.rescheduleJobs( jobIDs )
if 'FailedJobs' in result:
message = 'Failed to reschedule %d jobs stuck in Matched status' % len( result['FailedJobs'] )
checkTime = str( dateTime() - self.rescheduledTime * second )
result = self.jobDB.selectJobs( {'Status':'Rescheduled'}, older = checkTime )
if not result['OK']:
self.log.error( 'Failed to select jobs', result['Message'] )
return result
jobIDs = result['Value']
if jobIDs:
self.log.info( 'Rescheduling %d jobs stuck in Rescheduled status' % len( jobIDs ) )
result = self.jobDB.rescheduleJobs( jobIDs )
if 'FailedJobs' in result:
if message:
message += '\n'
message += 'Failed to reschedule %d jobs stuck in Rescheduled status' % len( result['FailedJobs'] )
if message:
return S_ERROR( message )
else:
return S_OK()
def __failCompletedJobs( self ):
""" Failed Jobs stuck in Completed Status for a long time.
They are due to pilots being killed during the
finalization of the job execution.
"""
# Get old Completed Jobs
checkTime = str( dateTime() - self.completedTime * second )
result = self.jobDB.selectJobs( {'Status':'Completed'}, older = checkTime )
if not result['OK']:
self.log.error( 'Failed to select jobs', result['Message'] )
return result
jobIDs = result['Value']
if not jobIDs:
return S_OK()
# Remove those with Minor Status "Pending Requests"
for jobID in jobIDs:
result = self.jobDB.getJobAttributes( jobID, ['Status', 'MinorStatus'] )
if not result['OK']:
self.log.error( 'Failed to get job attributes', result['Message'] )
continue
if result['Value']['Status'] != "Completed":
continue
if result['Value']['MinorStatus'] == "Pending Requests":
continue
result = self.__updateJobStatus( jobID, 'Failed',
"Job died during finalization" )
result = self.__sendAccounting( jobID )
if not result['OK']:
self.log.error( 'Failed to send accounting', result['Message'] )
continue
return S_OK()
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
gpl-3.0
| -8,255,214,876,684,673,000
| 37.426199
| 170
| 0.612954
| false
| 3.884185
| true
| false
| false
|
roryscarson/NOVUS
|
novus_pkg/F_cashFlow_B_pnl.py
|
1
|
22178
|
#!python
# -*- encoding: utf-8 -*-
# F_cashFlow_B_pnl.py
# Greg Wilson, 2012
# gwilson.sq1@gmail.com
# This software is part of the Public Domain.
# This file is part of the NOVUS Entrepreneurship Training Program.
# NOVUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# NOVUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with NOVUS. If not, see <http://www.gnu.org/licenses/>.
'''
E_game_pnl.py +
+ X_listbook.py +
+ E_finance_pnl.py +
+ F_incomeStmt_B_pnl.py
+ F_balanceSheet_B_pnl.py
+ F_cashFlow_B_pnl.py
This module contains the Cash Flow "B" Panel class code for the Novus
Business and IT education program. The "B" panel show the
pro forma results for this year, and, after the submission of decisions,
the actual results for the round.
'''
import wx
import wx.lib.scrolledpanel as scrolled
import X_styles, X_miscPnls
import Q_data
from Q_language import GetPhrase
class CashFlow_B_Pnl(scrolled.ScrolledPanel):
'''This class holds the CashFlow panel for the Novus Business and IT
education program.'''
def __init__(self, parent, *args, **kwargs):
scrolled.ScrolledPanel.__init__(self, parent, *args, **kwargs)
# Styles ----------------------------------------------------------
self.styles = X_styles.NovusStyle(self)
self.SetBackgroundColour(wx.WHITE)
# Data ------------------------------------------------------------
self.data = Q_data.Data(None)
# Labels
#------------------------------------------------------------------
lang = self.data.GetData1()[1][1]
self.cashFlowAndRatio_lbl = GetPhrase('cashFlow_lbl', lang) + ' / ' + GetPhrase('financialRatios_lbl', lang)
self.forecast_lbl = GetPhrase('forecast_lbl', lang)
self.actual_lbl = GetPhrase('actual_lbl', lang)
# Cash Flow Labels ------------------------------------------------
self.cashFlowStmt_lbl = GetPhrase('cashFlow_lbl', lang)
self.opActivities_lbl = GetPhrase('opActivities_lbl', lang)
self.netIncome_lbl = GetPhrase('netIncome_lbl', lang)
self.depreciation_lbl = GetPhrase('depreciation_lbl', lang)
self.chgInAR_lbl = GetPhrase('chgInAR_lbl', lang)
self.chgInInv_lbl = GetPhrase('chgInInv_lbl', lang)
self.chgInAP_lbl = GetPhrase('chgInAP_lbl', lang)
self.NetCashFlowOps_lbl = GetPhrase('NetCashFlowOps_lbl', lang)
self.investingActivities_lbl = GetPhrase('investingActivities_lbl', lang)
self.capExp_lbl = GetPhrase('capExp_lbl', lang)
self.addGFA_lbl = GetPhrase('addGFA_lbl', lang)
self.addTrucks_lbl = GetPhrase('addTrucks_lbl', lang)
self.addMach_lbl = GetPhrase('addMach_lbl', lang)
self.netCashFlowInv_lbl = GetPhrase('netCashFlowInv_lbl', lang)
self.FinancingActivities_lbl = GetPhrase('FinancingActivities_lbl', lang)
self.chgShortTermFin_lbl = GetPhrase('chgShortTermFin_lbl', lang)
self.chgShortTermLoC_lbl = GetPhrase('chgShortTermLoC_lbl', lang)
self.incLTD_lbl = GetPhrase('incLTD_lbl', lang)
self.decLTD_lbl = GetPhrase('decLTD_lbl', lang)
self.proceedsFromStock_lbl = GetPhrase('proceedsFromStock_lbl', lang)
self.cashDivPaid_lbl = GetPhrase('cashDivPaid_lbl', lang)
self.netCashFlowFin_lbl = GetPhrase('netCashFlowFin_lbl', lang)
self.netCashFlowAll_lbl = GetPhrase('netCashFlowAll_lbl', lang)
self.begCashBal_lbl = GetPhrase('begCashBal_lbl', lang)
self.endCashBal_lbl = GetPhrase('endCashBal_lbl', lang)
# Ratio Labels ----------------------------------------------------
self.financialRatios_lbl = GetPhrase('financialRatios_lbl', lang)
self.y2yGrowth_lbl = GetPhrase('y2yGrowth_lbl', lang)
self.grossMargin_lbl = GetPhrase('grossMargin_lbl', lang)
self.SGAofSales_lbl = GetPhrase('SGAofSales_lbl', lang)
self.EBITDAOpMarg_lbl = GetPhrase('EBITDAOpMarg_lbl', lang)
self.EBITOpMarg_lbl = GetPhrase('EBITOpMarg_lbl', lang)
self.taxRate_lbl = GetPhrase('taxRate_lbl', lang)
self.netProfMarg_lbl = GetPhrase('netProfMarg_lbl', lang)
self.currentRatio_lbl = GetPhrase('currentRatio_lbl', lang)
self.quickRatio_lbl = GetPhrase('quickRatio_lbl', lang)
self.cashRatio_lbl = GetPhrase('cashRatio_lbl', lang)
self.daysInvOut_lbl = GetPhrase('daysInvOut_lbl', lang)
self.daysSalesOut_lbl = GetPhrase('daysSalesOut_lbl', lang)
self.daysPayablesOut_lbl = GetPhrase('daysPayablesOut_lbl', lang)
self.ccc_lbl = GetPhrase('ccc_lbl', lang)
self.roa_lbl = GetPhrase('RoA_lbl', lang)
self.roe_lbl = GetPhrase('RoE_lbl', lang)
self.roi_lbl = GetPhrase('roi_lbl', lang)
self.estIR_lbl = GetPhrase('estIR_lbl', lang)
self.debtEquity_lbl = GetPhrase('debtEquity_lbl', lang)
self.ebitdaToIntExp_lbl = GetPhrase('timesInt_lbl', lang)
# Cash Flow List Objects
#------------------------------------------------------------------
self.opActivities_list = [self.opActivities_lbl, '', '', '', '', '', '']
self.netIncome_list = [' '+self.netIncome_lbl, '-', '-']
self.depreciation_list = [' ( + )'+self.depreciation_lbl, '-', '-']
self.chgInAR_list = [' (+/-)'+self.chgInAR_lbl, '-', '-']
self.chgInInv_list = [' (+/-)'+self.chgInInv_lbl, '-', '-']
self.chgInAP_list = [' (+/-)'+self.chgInAP_lbl, '-', '-']
self.NetCashFlowOps_list = [self.NetCashFlowOps_lbl, '-', '-']
self.investingActivities_list = [self.investingActivities_lbl, '', '', '', '', '', '']
self.addGFA_list = [' '+self.capExp_lbl+' - '+self.addGFA_lbl, '-', '-']
self.addTrucks_list = [' '+self.capExp_lbl+' - '+self.addTrucks_lbl, '-', '-']
self.addMach_list = [' '+self.capExp_lbl+' - '+self.addMach_lbl, '-', '-']
self.netCashFlowInv_list = [self.netCashFlowInv_lbl, '-', '-']
self.FinancingActivities_list = [self.FinancingActivities_lbl, '', '', '', '', '', '']
self.chgShortTermFin_list = [' '+self.chgShortTermFin_lbl, '-', '-']
self.chgShortTermLoC_list = [' '+self.chgShortTermLoC_lbl, '-', '-']
self.incLTD_list = [' '+self.incLTD_lbl, '-', '-']
self.decLTD_list = [' '+self.decLTD_lbl, '-', '-']
self.proceedsFromStock_list = [' '+self.proceedsFromStock_lbl, '-', '-']
self.cashDivPaid_list = [' '+self.cashDivPaid_lbl, '-', '-']
self.netCashFlowFin_list = [self.netCashFlowFin_lbl, '-', '-']
self.netCashFlowAll_list = [self.netCashFlowAll_lbl, '-', '-']
self.begCashBal_list = [self.begCashBal_lbl, '-', '-']
self.endCashBal_list = [self.endCashBal_lbl, '-', '-']
self.cf_fields = [self.opActivities_list, self.netIncome_list, self.depreciation_list,
self.chgInAR_list, self.chgInInv_list, self.chgInAP_list,
self.NetCashFlowOps_list, self.investingActivities_list,
self.addGFA_list, self.addTrucks_list, self.addMach_list,
self.netCashFlowInv_list, self.FinancingActivities_list,
self.chgShortTermFin_list, self.chgShortTermLoC_list,
self.incLTD_list, self.decLTD_list, self.proceedsFromStock_list,
self.cashDivPaid_list, self.netCashFlowFin_list, self.netCashFlowAll_list,
self.begCashBal_list, self.endCashBal_list]
# Financial Ratio List Objects
#------------------------------------------------------------------
self.y2yGrowth_list = [self.y2yGrowth_lbl, '-', '-']
self.grossMargin_list = [self.grossMargin_lbl, '-', '-']
self.SGAofSales_list = [self.SGAofSales_lbl, '-', '-']
self.EBITDAOpMarg_list = [self.EBITDAOpMarg_lbl, '-', '-']
self.EBITOpMarg_list = [self.EBITOpMarg_lbl, '-', '-']
self.taxRate_list = [self.taxRate_lbl, '-', '-']
self.netProfMarg_list = [self.netProfMarg_lbl, '-', '-']
self.currentRatio_list = [self.currentRatio_lbl, '-', '-']
self.quickRatio_list = [self.quickRatio_lbl, '-', '-']
self.cashRatio_list = [self.cashRatio_lbl, '-', '-']
self.daysInvOut_list = [self.daysInvOut_lbl, '-', '-']
self.daysSalesOut_list = [self.daysSalesOut_lbl, '-', '-']
self.daysPayablesOut_list = [self.daysPayablesOut_lbl, '-', '-']
self.ccc_list = [self.ccc_lbl, '-', '-']
self.roa_list = [self.roa_lbl, '-', '-']
self.roe_list = [self.roe_lbl, '-', '-']
self.roi_list = [self.roi_lbl, '-', '-']
self.estIR_list = [self.estIR_lbl, '-', '-']
self.debtEquity_list = [self.debtEquity_lbl, '-', '-']
self.ebitdaToIntExp_list = [self.ebitdaToIntExp_lbl, '-', '-']
self.fr_fields = [self.y2yGrowth_list, self.grossMargin_list, self.SGAofSales_list,
self.EBITDAOpMarg_list, self.EBITOpMarg_list, self.taxRate_list,
self.netProfMarg_list, self.currentRatio_list, self.quickRatio_list,
self.cashRatio_list,
self.daysInvOut_list, self.daysSalesOut_list, self.daysPayablesOut_list,
self.ccc_list, self.roa_list, self.roe_list,
self.roi_list, self.estIR_list, self.debtEquity_list,
self.ebitdaToIntExp_list]
# Formatting ------------------------------------------------------
self.bold_list = [self.opActivities_list, self.NetCashFlowOps_list,
self.investingActivities_list, self.netCashFlowInv_list,
self.FinancingActivities_list, self.netCashFlowFin_list,
self.netCashFlowAll_list, self.begCashBal_list,
self.endCashBal_list]
self.italic_list = []
# Sizer
#------------------------------------------------------------------
sizer = wx.BoxSizer(wx.VERTICAL)
# Title -----------------------------------------------------------
self.cashFlowAndRatio_st = wx.StaticText(self, -1, self.cashFlowAndRatio_lbl)
self.cashFlowAndRatio_st.SetFont(self.styles.h1_font)
sizer.Add(self.cashFlowAndRatio_st, 0, wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, 5)
# Cash Flow Panels ------------------------------------------------
self.opActivities_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.netIncome_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.depreciation_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.chgInAR_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.chgInInv_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.chgInAP_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.NetCashFlowOps_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.investingActivities_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.addGFA_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.addTrucks_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.addMach_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.netCashFlowInv_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.FinancingActivities_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.chgShortTermFin_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.chgShortTermLoC_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.incLTD_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.decLTD_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.proceedsFromStock_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.cashDivPaid_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.netCashFlowFin_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.netCashFlowAll_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.begCashBal_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.endCashBal_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.cf_pnls = [self.opActivities_pnl, self.netIncome_pnl, self.depreciation_pnl,
self.chgInAR_pnl, self.chgInInv_pnl, self.chgInAP_pnl,
self.NetCashFlowOps_pnl, self.investingActivities_pnl,
self.addGFA_pnl, self.addTrucks_pnl, self.addMach_pnl,
self.netCashFlowInv_pnl , self.FinancingActivities_pnl,
self.chgShortTermFin_pnl, self.chgShortTermLoC_pnl,
self.incLTD_pnl, self.decLTD_pnl, self.proceedsFromStock_pnl,
self.cashDivPaid_pnl, self.netCashFlowFin_pnl , self.netCashFlowAll_pnl,
self.begCashBal_pnl, self.endCashBal_pnl]
# Financial Ratio Panels ------------------------------------------
self.y2yGrowth_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.grossMargin_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.SGAofSales_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.EBITDAOpMarg_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.EBITOpMarg_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.taxRate_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.netProfMarg_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.currentRatio_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.quickRatio_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.cashRatio_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.daysInvOut_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.daysSalesOut_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.daysPayablesOut_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.ccc_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.roa_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.roe_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.roi_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.estIR_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.debtEquity_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.ebitdaToIntExp_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.fr_pnls = [self.y2yGrowth_pnl , self.grossMargin_pnl , self.SGAofSales_pnl ,
self.EBITDAOpMarg_pnl , self.EBITOpMarg_pnl , self.taxRate_pnl ,
self.netProfMarg_pnl , self.currentRatio_pnl , self.quickRatio_pnl ,
self.cashRatio_pnl,
self.daysInvOut_pnl , self.daysSalesOut_pnl , self.daysPayablesOut_pnl ,
self.ccc_pnl , self.roa_pnl , self.roe_pnl ,
self.roi_pnl , self.estIR_pnl , self.debtEquity_pnl,
self.ebitdaToIntExp_pnl]
# Add Cash Flow Panels to Sizer -----------------------------------
self.cashFlowStmt_list = [self.cashFlowStmt_lbl, self.forecast_lbl, self.actual_lbl]
self.cashFlowStmt_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.cashFlowStmt_pnl.Init(self.cashFlowStmt_list)
sizer.Add(self.cashFlowStmt_pnl, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
lineCount = 0
addSL_list = (6, 11, 18, 22) # Indicates where to insert a static line
for pnl, fld in zip(self.cf_pnls, self.cf_fields):
bold, italic = False, False
if fld in self.bold_list:
bold = True
if fld in self.italic_list:
italic = True
pnl.Init(fld, bold, italic)
sizer.Add(pnl, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
if lineCount in addSL_list:
sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
if lineCount % 2 == 0:
pnl.SetBackgroundColour(self.styles.lightGrey)
if lineCount in (20, ):
sizer.Add((-1, 10))
lineCount += 1
# Add Financial Ratios --------------------------------------------
sizer.Add((-1, 20))
self.financialRatios_list = [self.financialRatios_lbl, self.forecast_lbl, self.actual_lbl]
self.financialRatios_pnl = X_miscPnls.Report2_Row_Pnl(self, -1)
self.financialRatios_pnl.Init(self.financialRatios_list)
sizer.Add(self.financialRatios_pnl, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
lineCount = 0
for pnl, fld in zip(self.fr_pnls, self.fr_fields):
bold, italic = False, False
if fld in self.bold_list:
bold = True
if fld in self.italic_list:
italic = True
pnl.Init(fld, bold, italic)
sizer.Add(pnl, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5)
if lineCount % 2 == 0:
pnl.SetBackgroundColour(self.styles.lightGrey)
lineCount += 1
self.SetSizer(sizer)
self.SetupScrolling()
#----------------------------------------------------------------------
def UpdateCF(self, cfList, isPF=True):
'''Adds values to the Cash Flow statement.'''
if isPF:
insCol = 1
else:
insCol = 2
# Net Cash Flows from Operations Activities
netIncome, depr, chRec, chInv, chPay = cfList[0]
self.netIncome_pnl.AddVal(netIncome, insCol)
self.depreciation_pnl.AddVal(depr, insCol)
self.chgInAR_pnl.AddVal(chRec, insCol)
self.chgInInv_pnl.AddVal(chInv, insCol)
self.chgInAP_pnl.AddVal(chPay, insCol)
netOps = netIncome + depr + chRec + chInv + chPay
self.NetCashFlowOps_pnl.AddVal(netOps, insCol)
# Net Cash Flows from Investing Activities
eq, tr, ma = cfList[1]
self.addGFA_pnl.AddVal(eq, insCol)
self.addTrucks_pnl.AddVal(tr, insCol)
self.addMach_pnl.AddVal(ma, insCol)
netInv = eq + tr + ma
self.netCashFlowInv_pnl.AddVal(netInv, insCol)
# Net Cash Flows from Financing Activities
chSTB, chLoC, incLTD, decLTD, incEq, divPaid = cfList[2]
self.chgShortTermFin_pnl.AddVal(chSTB, insCol)
self.chgShortTermLoC_pnl.AddVal(chLoC, insCol)
self.incLTD_pnl.AddVal(incLTD, insCol)
self.decLTD_pnl.AddVal(decLTD, insCol)
self.proceedsFromStock_pnl.AddVal(incEq, insCol)
self.cashDivPaid_pnl.AddVal(divPaid, insCol)
netFin = chSTB + chLoC + incLTD + decLTD + incEq + divPaid
self.netCashFlowFin_pnl.AddVal(netFin, insCol)
netAll = netOps + netInv + netFin
self.netCashFlowAll_pnl.AddVal(netAll, insCol)
# Beginning and ending cash balance
begCB = cfList[3]
endCB = begCB + netAll
self.begCashBal_pnl.AddVal(begCB, insCol)
self.endCashBal_pnl.AddVal(endCB, insCol)
#----------------------------------------------------------------------
def UpdateFR(self, frList, isPF=True):
'''Adds the financial ratios to the cash flow / ratio panel.'''
if isPF:
insCol = 1
else:
insCol = 2
self.y2yGrowth_pnl.AddVal(frList[0], insCol, isCur=False, isPerc=True)
self.grossMargin_pnl.AddVal(frList[1], insCol, isCur=False, isPerc=True)
self.SGAofSales_pnl.AddVal(frList[2], insCol, isCur=False, isPerc=True)
self.EBITDAOpMarg_pnl.AddVal(frList[3], insCol, isCur=False, isPerc=True)
self.EBITOpMarg_pnl.AddVal(frList[4], insCol, isCur=False, isPerc=True)
self.taxRate_pnl.AddVal(frList[5], insCol, isCur=False, isPerc=True)
self.netProfMarg_pnl.AddVal(frList[6], insCol, isCur=False, isPerc=True)
self.currentRatio_pnl.AddFloat(frList[7], insCol)
self.quickRatio_pnl.AddFloat(frList[8], insCol)
self.cashRatio_pnl.AddFloat(frList[9], insCol)
self.daysInvOut_pnl.AddFloat(frList[10], insCol)
self.daysSalesOut_pnl.AddFloat(frList[11], insCol)
self.daysPayablesOut_pnl.AddFloat(frList[12], insCol)
self.ccc_pnl.AddFloat(frList[13], insCol)
self.roa_pnl.AddFloat(frList[14], insCol)
self.roe_pnl.AddFloat(frList[15], insCol)
self.roi_pnl.AddFloat(frList[16], insCol)
self.estIR_pnl.AddVal(frList[17], insCol, isCur=False, isPerc=True)
self.debtEquity_pnl.AddFloat(frList[18], insCol)
self.ebitdaToIntExp_pnl.AddFloat(frList[19], insCol)
self.Scroll(0, 0)
#----------------------------------------------------------------------
def ExportCF(self):
'''Exports the statement of cash flows'''
cf = []
for p in [self.cashFlowStmt_pnl]+self.cf_pnls:
cf.append(p.ExportRow())
return cf
#----------------------------------------------------------------------
def ExportFR(self):
'''Exports the financial ratios.'''
fr = []
for p in [self.financialRatios_pnl]+self.fr_pnls:
fr.append(p.ExportRow())
return fr
|
gpl-3.0
| 8,315,200,174,922,877,000
| 53.762963
| 116
| 0.571287
| false
| 3.086279
| false
| false
| false
|
82Flex/DCRM
|
fluent_comments/templatetags/fluent_comments_tags.py
|
1
|
5568
|
import django
from django.conf import settings
from django.template import Library, Node
from django.template.loader import get_template
from fluent_comments.utils import get_comment_template_name, get_comment_context_data
from tag_parser import parse_token_kwargs
from tag_parser.basetags import BaseInclusionNode
from fluent_comments import appsettings
from fluent_comments.models import get_comments_for_model
from fluent_comments.moderation import comments_are_open, comments_are_moderated
try:
from django.template import context_processors # Django 1.10+
except:
from django.core import context_processors
register = Library()
class AjaxCommentTags(BaseInclusionNode):
"""
Custom inclusion node with some special parsing features.
Using the ``@register.inclusion_tag`` is not sufficient,
because some keywords require custom parsing.
"""
template_name = "fluent_comments/templatetags/ajax_comment_tags.html"
min_args = 1
max_args = 1
@classmethod
def parse(cls, parser, token):
"""
Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag.
"""
# Process the template line.
tag_name, args, kwargs = parse_token_kwargs(
parser, token,
allowed_kwargs=cls.allowed_kwargs,
compile_args=False, # Only overrule here, keep at render() phase.
compile_kwargs=cls.compile_kwargs
)
# remove "for" keyword, so all other args can be resolved in render().
if args[0] == 'for':
args.pop(0)
# And apply the compilation afterwards
for i in range(len(args)):
args[i] = parser.compile_filter(args[i])
cls.validate_args(tag_name, *args, **kwargs)
return cls(tag_name, *args, **kwargs)
def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
"""
The main logic for the inclusion node, analogous to ``@register.inclusion_node``.
"""
target_object = tag_args[0] # moved one spot due to .pop(0)
new_context = {
'STATIC_URL': parent_context.get('STATIC_URL', None),
'USE_THREADEDCOMMENTS': appsettings.USE_THREADEDCOMMENTS,
'target_object': target_object,
}
# Be configuration independent:
if new_context['STATIC_URL'] is None:
try:
request = parent_context['request']
except KeyError:
new_context.update({'STATIC_URL': settings.STATIC_URL})
else:
new_context.update(context_processors.static(request))
return new_context
@register.tag
def ajax_comment_tags(parser, token):
"""
Display the required ``<div>`` elements to let the Ajax comment functionality work with your form.
"""
return AjaxCommentTags.parse(parser, token)
register.filter('comments_are_open', comments_are_open)
register.filter('comments_are_moderated', comments_are_moderated)
@register.filter
def comments_count(content_object):
"""
Return the number of comments posted at a target object.
You can use this instead of the ``{% get_comment_count for [object] as [varname] %}`` tag.
"""
return get_comments_for_model(content_object).count()
class FluentCommentsList(Node):
def render(self, context):
# Include proper template, avoid parsing it twice by operating like @register.inclusion_tag()
if not getattr(self, 'nodelist', None):
if appsettings.USE_THREADEDCOMMENTS:
template = get_template("fluent_comments/templatetags/threaded_list.html")
else:
template = get_template("fluent_comments/templatetags/flat_list.html")
self.nodelist = template
# NOTE NOTE NOTE
# HACK: Determine the parent object based on the comment list queryset.
# the {% render_comment_list for article %} tag does not pass the object in a general form to the template.
# Not assuming that 'object.pk' holds the correct value.
#
# This obviously doesn't work when the list is empty.
# To address that, the client-side code also fixes that, by looking for the object ID in the nearby form.
target_object_id = context.get('target_object_id', None)
if not target_object_id:
comment_list = context['comment_list']
if isinstance(comment_list, list) and comment_list:
target_object_id = comment_list[0].object_pk
# Render the node
context['USE_THREADEDCOMMENTS'] = appsettings.USE_THREADEDCOMMENTS
context['target_object_id'] = target_object_id
if django.VERSION >= (1, 8):
context = context.flatten()
return self.nodelist.render(context)
@register.tag
def fluent_comments_list(parser, token):
"""
A tag to select the proper template for the current comments app.
"""
return FluentCommentsList()
class RenderCommentNode(BaseInclusionNode):
min_args = 1
max_args = 1
def get_template_name(self, *tag_args, **tag_kwargs):
return get_comment_template_name(comment=tag_args[0])
def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
return get_comment_context_data(comment=tag_args[0])
@register.tag
def render_comment(parser, token):
"""
Render a single comment.
This tag does not exist in the standard django_comments,
because it only renders a complete list.
"""
return RenderCommentNode.parse(parser, token)
|
agpl-3.0
| 6,444,168,918,875,907,000
| 34.018868
| 115
| 0.655352
| false
| 3.988539
| false
| false
| false
|
sysbot/CouchPotatoServer
|
couchpotato/core/plugins/scanner/main.py
|
1
|
33209
|
from couchpotato import get_session
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss, sp
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \
splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import File, Media
from enzyme.exceptions import NoParserError, ParseError
from guessit import guess_movie_info
from subliminal.videos import Video
import enzyme
import os
import re
import threading
import time
import traceback
from six.moves import filter, map, zip
log = CPLog(__name__)
class Scanner(Plugin):
ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_',
'_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo',
'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files
ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate']
extensions = {
'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'],
'movie_extra': ['mds'],
'dvd': ['vts_*', 'vob'],
'nfo': ['nfo', 'txt', 'tag'],
'subtitle': ['sub', 'srt', 'ssa', 'ass'],
'subtitle_extra': ['idx'],
'trailer': ['mov', 'mp4', 'flv']
}
file_types = {
'subtitle': ('subtitle', 'subtitle'),
'subtitle_extra': ('subtitle', 'subtitle_extra'),
'trailer': ('video', 'trailer'),
'nfo': ('nfo', 'nfo'),
'movie': ('video', 'movie'),
'movie_extra': ('movie', 'movie_extra'),
'backdrop': ('image', 'backdrop'),
'poster': ('image', 'poster'),
'thumbnail': ('image', 'thumbnail'),
'leftover': ('leftover', 'leftover'),
}
file_sizes = { # in MB
'movie': {'min': 300},
'trailer': {'min': 2, 'max': 250},
'backdrop': {'min': 0, 'max': 5},
}
codecs = {
'audio': ['dts', 'ac3', 'ac3d', 'mp3'],
'video': ['x264', 'h264', 'divx', 'xvid']
}
audio_codec_map = {
0x2000: 'ac3',
0x2001: 'dts',
0x0055: 'mp3',
0x0050: 'mp2',
0x0001: 'pcm',
0x003: 'pcm',
0x77a1: 'tta1',
0x5756: 'wav',
0x6750: 'vorbis',
0xF1AC: 'flac',
0x00ff: 'aac',
}
source_media = {
'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'],
'hddvd': ['hddvd', 'hd-dvd'],
'dvd': ['dvd'],
'hdtv': ['hdtv']
}
clean = '[ _\,\.\(\)\[\]\-]?(extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
'|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
'[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1
'[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1
'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext
'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext
'part[ _\.-]*([0-9a-d]+)$', #part1.mkv
'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv
'()[ _\.-]+([0-9]*[abcd]+)(\.....?)$',
'([a-z])([0-9]+)(\.....?)$',
'()([ab])(\.....?)$' #*a.mkv
]
cp_imdb = '(.cp.(?P<id>tt[0-9{7}]+).)'
def __init__(self):
addEvent('scanner.create_file_identifier', self.createStringIdentifier)
addEvent('scanner.remove_cptag', self.removeCPTag)
addEvent('scanner.scan', self.scan)
addEvent('scanner.name_year', self.getReleaseNameYear)
addEvent('scanner.partnumber', self.getPartNumber)
def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None):
folder = sp(folder)
if not folder or not os.path.isdir(folder):
log.error('Folder doesn\'t exists: %s', folder)
return {}
# Get movie "master" files
movie_files = {}
leftovers = []
# Scan all files of the folder if no files are set
if not files:
check_file_date = True
try:
files = []
for root, dirs, walk_files in os.walk(folder):
files.extend([sp(os.path.join(root, filename)) for filename in walk_files])
# Break if CP wants to shut down
if self.shuttingDown():
break
except:
log.error('Failed getting files from %s: %s', (folder, traceback.format_exc()))
log.debug('Found %s files to scan and group in %s', (len(files), folder))
else:
check_file_date = False
files = [sp(x) for x in files]
for file_path in files:
if not os.path.exists(file_path):
continue
# Remove ignored files
if self.isSampleFile(file_path):
leftovers.append(file_path)
continue
elif not self.keepFile(file_path):
continue
is_dvd_file = self.isDVDFile(file_path)
if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file
# Normal identifier
identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file)
identifiers = [identifier]
# Identifier with quality
quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'}
if quality:
identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', ''))
identifiers = [identifier_with_quality, identifier]
if not movie_files.get(identifier):
movie_files[identifier] = {
'unsorted_files': [],
'identifiers': identifiers,
'is_dvd': is_dvd_file,
}
movie_files[identifier]['unsorted_files'].append(file_path)
else:
leftovers.append(file_path)
# Break if CP wants to shut down
if self.shuttingDown():
break
# Cleanup
del files
# Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
# files will be grouped first.
leftovers = set(sorted(leftovers, reverse = True))
# Group files minus extension
ignored_identifiers = []
for identifier, group in movie_files.items():
if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier)
log.debug('Grouping files: %s', identifier)
has_ignored = 0
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
wo_ext = file_path[:-(len(ext) + 1)]
found_files = set([i for i in leftovers if wo_ext in i])
group['unsorted_files'].extend(found_files)
leftovers = leftovers - found_files
has_ignored += 1 if ext == 'ignore' else 0
if has_ignored == 0:
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
has_ignored += 1 if ext == 'ignore' else 0
if has_ignored > 0:
ignored_identifiers.append(identifier)
# Break if CP wants to shut down
if self.shuttingDown():
break
# Create identifiers for all leftover files
path_identifiers = {}
for file_path in leftovers:
identifier = self.createStringIdentifier(file_path, folder)
if not path_identifiers.get(identifier):
path_identifiers[identifier] = []
path_identifiers[identifier].append(file_path)
# Group the files based on the identifier
delete_identifiers = []
for identifier, found_files in path_identifiers.items():
log.debug('Grouping files on identifier: %s', identifier)
group = movie_files.get(identifier)
if group:
group['unsorted_files'].extend(found_files)
delete_identifiers.append(identifier)
# Remove the found files from the leftover stack
leftovers = leftovers - set(found_files)
# Break if CP wants to shut down
if self.shuttingDown():
break
# Cleaning up used
for identifier in delete_identifiers:
if path_identifiers.get(identifier):
del path_identifiers[identifier]
del delete_identifiers
# Group based on folder
delete_identifiers = []
for identifier, found_files in path_identifiers.items():
log.debug('Grouping files on foldername: %s', identifier)
for ff in found_files:
new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder)
group = movie_files.get(new_identifier)
if group:
group['unsorted_files'].extend([ff])
delete_identifiers.append(identifier)
# Remove the found files from the leftover stack
leftovers -= leftovers - set([ff])
# Break if CP wants to shut down
if self.shuttingDown():
break
# leftovers should be empty
if leftovers:
log.debug('Some files are still left over: %s', leftovers)
# Cleaning up used
for identifier in delete_identifiers:
if path_identifiers.get(identifier):
del path_identifiers[identifier]
del delete_identifiers
# Make sure we remove older / still extracting files
valid_files = {}
while True and not self.shuttingDown():
try:
identifier, group = movie_files.popitem()
except:
break
# Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute
file_too_new = False
for cur_file in group['unsorted_files']:
if not os.path.isfile(cur_file):
file_too_new = time.time()
break
file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)]
for t in file_time:
if t > time.time() - 60:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if check_file_date and file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier))
# Delete the unsorted list
del group['unsorted_files']
continue
# Only process movies newer than x
if newer_than and newer_than > 0:
has_new_files = False
for cur_file in group['unsorted_files']:
file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)]
if file_time[0] > newer_than or file_time[1] > newer_than:
has_new_files = True
break
if not has_new_files:
log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier))
# Delete the unsorted list
del group['unsorted_files']
continue
valid_files[identifier] = group
del movie_files
total_found = len(valid_files)
# Make sure only one movie was found if a download ID is provided
if release_download and total_found == 0:
log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id'))
elif release_download and total_found > 1:
log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files)))
release_download = None
# Determine file types
db = get_session()
processed_movies = {}
while True and not self.shuttingDown():
try:
identifier, group = valid_files.popitem()
except:
break
if return_ignored is False and identifier in ignored_identifiers:
log.debug('Ignore file found, ignoring release: %s', identifier)
continue
# Group extra (and easy) files first
group['files'] = {
'movie_extra': self.getMovieExtras(group['unsorted_files']),
'subtitle': self.getSubtitles(group['unsorted_files']),
'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']),
'nfo': self.getNfo(group['unsorted_files']),
'trailer': self.getTrailers(group['unsorted_files']),
'leftover': set(group['unsorted_files']),
}
# Media files
if group['is_dvd']:
group['files']['movie'] = self.getDVDFiles(group['unsorted_files'])
else:
group['files']['movie'] = self.getMediaFiles(group['unsorted_files'])
if len(group['files']['movie']) == 0:
log.error('Couldn\'t find any movie files for %s', identifier)
continue
log.debug('Getting metadata for %s', identifier)
group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download)
# Subtitle meta
group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {}
# Get parent dir from movie files
for movie_file in group['files']['movie']:
group['parentdir'] = os.path.dirname(movie_file)
group['dirname'] = None
folder_names = group['parentdir'].replace(folder, '').split(os.path.sep)
folder_names.reverse()
# Try and get a proper dirname, so no "A", "Movie", "Download" etc
for folder_name in folder_names:
if folder_name.lower() not in self.ignore_names and len(folder_name) > 2:
group['dirname'] = folder_name
break
break
# Leftover "sorted" files
for file_type in group['files']:
if not file_type is 'leftover':
group['files']['leftover'] -= set(group['files'][file_type])
group['files'][file_type] = list(group['files'][file_type])
group['files']['leftover'] = list(group['files']['leftover'])
# Delete the unsorted list
del group['unsorted_files']
# Determine movie
group['library'] = self.determineMovie(group, release_download = release_download)
if not group['library']:
log.error('Unable to determine movie: %s', group['identifiers'])
else:
movie = db.query(Media).filter_by(library_id = group['library']['id']).first()
group['movie_id'] = None if not movie else movie.id
db.expire_all()
processed_movies[identifier] = group
# Notify parent & progress on something found
if on_found:
on_found(group, total_found, total_found - len(processed_movies))
# Wait for all the async events calm down a bit
while threading.activeCount() > 100 and not self.shuttingDown():
log.debug('Too many threads active, waiting a few seconds')
time.sleep(10)
if len(processed_movies) > 0:
log.info('Found %s movies in the folder %s', (len(processed_movies), folder))
else:
log.debug('Found no movies in the folder %s', folder)
return processed_movies
def getMetaData(self, group, folder = '', release_download = None):
data = {}
files = list(group['files']['movie'])
for cur_file in files:
if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files
meta = self.getMeta(cur_file)
try:
data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video']))
data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio']))
data['resolution_width'] = meta.get('resolution_width', 720)
data['resolution_height'] = meta.get('resolution_height', 480)
data['audio_channels'] = meta.get('audio_channels', 2.0)
data['aspect'] = round(float(meta.get('resolution_width', 720)) / meta.get('resolution_height', 480), 2)
except:
log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc()))
pass
if data.get('audio'): break
# Use the quality guess first, if that failes use the quality we wanted to download
data['quality'] = None
if release_download and release_download.get('quality'):
data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True)
if not data['quality']:
data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True)
if not data['quality']:
data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True)
data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD'
filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0])
data['group'] = self.getGroup(filename[len(folder):])
data['source'] = self.getSourceMedia(filename)
return data
def getMeta(self, filename):
try:
p = enzyme.parse(filename)
# Video codec
vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower()
# Audio codec
ac = p.audio[0].codec
try: ac = self.audio_codec_map.get(p.audio[0].codec)
except: pass
return {
'video': vc,
'audio': ac,
'resolution_width': tryInt(p.video[0].width),
'resolution_height': tryInt(p.video[0].height),
'audio_channels': p.audio[0].channels,
}
except ParseError:
log.debug('Failed to parse meta for %s', filename)
except NoParserError:
log.debug('No parser found for %s', filename)
except:
log.debug('Failed parsing %s', filename)
return {}
def getSubtitleLanguage(self, group):
detected_languages = {}
# Subliminal scanner
paths = None
try:
paths = group['files']['movie']
scan_result = []
for p in paths:
if not group['is_dvd']:
video = Video.from_path(toUnicode(p))
video_result = [(video, video.scan())]
scan_result.extend(video_result)
for video, detected_subtitles in scan_result:
for s in detected_subtitles:
if s.language and s.path not in paths:
detected_languages[s.path] = [s.language]
except:
log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc()))
# IDX
for extra in group['files']['subtitle_extra']:
try:
if os.path.isfile(extra):
output = open(extra, 'r')
txt = output.read()
output.close()
idx_langs = re.findall('\nid: (\w+)', txt)
sub_file = '%s.sub' % os.path.splitext(extra)[0]
if len(idx_langs) > 0 and os.path.isfile(sub_file):
detected_languages[sub_file] = idx_langs
except:
log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc()))
return detected_languages
def determineMovie(self, group, release_download = None):
# Get imdb id from downloader
imdb_id = release_download and release_download.get('imdb_id')
if imdb_id:
log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id'))
files = group['files']
# Check for CP(imdb_id) string in the file paths
if not imdb_id:
for cur_file in files['movie']:
imdb_id = self.getCPImdb(cur_file)
if imdb_id:
log.debug('Found movie via CP tag: %s', cur_file)
break
# Check and see if nfo contains the imdb-id
nfo_file = None
if not imdb_id:
try:
for nf in files['nfo']:
imdb_id = getImdb(nf, check_inside = True)
if imdb_id:
log.debug('Found movie via nfo file: %s', nf)
nfo_file = nf
break
except:
pass
# Check and see if filenames contains the imdb-id
if not imdb_id:
try:
for filetype in files:
for filetype_file in files[filetype]:
imdb_id = getImdb(filetype_file)
if imdb_id:
log.debug('Found movie via imdb in filename: %s', nfo_file)
break
except:
pass
# Check if path is already in db
if not imdb_id:
db = get_session()
for cf in files['movie']:
f = db.query(File).filter_by(path = toUnicode(cf)).first()
try:
imdb_id = f.library[0].identifier
log.debug('Found movie via database: %s', cf)
cur_file = cf
break
except:
pass
# Search based on identifiers
if not imdb_id:
for identifier in group['identifiers']:
if len(identifier) > 2:
try: filename = list(group['files'].get('movie'))[0]
except: filename = None
name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None)
if name_year.get('name') and name_year.get('year'):
movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1)
if len(movie) > 0:
imdb_id = movie[0].get('imdb')
log.debug('Found movie via search: %s', cur_file)
if imdb_id: break
else:
log.debug('Identifier to short to use for search: %s', identifier)
if imdb_id:
return fireEvent('library.add.movie', attrs = {
'identifier': imdb_id
}, update_after = False, single = True)
log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers'])
return {}
def getCPImdb(self, string):
try:
m = re.search(self.cp_imdb, string.lower())
id = m.group('id')
if id: return id
except AttributeError:
pass
return False
def removeCPTag(self, name):
try:
return re.sub(self.cp_imdb, '', name)
except:
pass
return name
def getSamples(self, files):
return set(filter(lambda s: self.isSampleFile(s), files))
def getMediaFiles(self, files):
def test(s):
return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s)
return set(filter(test, files))
def getMovieExtras(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['movie_extra'], files))
def getDVDFiles(self, files):
def test(s):
return self.isDVDFile(s)
return set(filter(test, files))
def getSubtitles(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files))
def getSubtitlesExtras(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle_extra'], files))
def getNfo(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files))
def getTrailers(self, files):
def test(s):
return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer'])
return set(filter(test, files))
def getImages(self, files):
def test(s):
return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn']
files = set(filter(test, files))
images = {
'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files))
}
# Rest
images['rest'] = files - images['backdrop']
return images
def isDVDFile(self, file_name):
if list(set(file_name.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])):
return True
for needle in ['vts_', 'video_ts', 'audio_ts', 'bdmv', 'certificate']:
if needle in file_name.lower():
return True
return False
def keepFile(self, filename):
# ignoredpaths
for i in self.ignored_in_path:
if i in filename.lower():
log.debug('Ignored "%s" contains "%s".', (filename, i))
return False
# All is OK
return True
def isSampleFile(self, filename):
is_sample = re.search('(^|[\W_])sample\d*[\W_]', filename.lower())
if is_sample: log.debug('Is sample file: %s', filename)
return is_sample
def filesizeBetween(self, file, file_size = None):
if not file_size: file_size = []
try:
return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576)
except:
log.error('Couldn\'t get filesize of %s.', file)
return False
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False):
year = self.findYear(file_path)
identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder
identifier = os.path.splitext(identifier)[0] # ext
try:
path_split = splitString(identifier, os.path.sep)
identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename
except: pass
if exclude_filename:
identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])]
# multipart
identifier = self.removeMultipart(identifier)
# remove cptag
identifier = self.removeCPTag(identifier)
# groups, release tags, scenename cleaner, regex isn't correct
identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':')
# Year
if year and identifier[:4] != year:
split_by = ':::' if ':::' in identifier else year
identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year)
else:
identifier = identifier.split('::')[0]
# Remove duplicates
out = []
for word in identifier.split():
if not word in out:
out.append(word)
identifier = ' '.join(out)
return simplifyString(identifier)
def removeMultipart(self, name):
for regex in self.multipart_regex:
try:
found = re.sub(regex, '', name)
if found != name:
name = found
except:
pass
return name
def getPartNumber(self, name):
for regex in self.multipart_regex:
try:
found = re.search(regex, name)
if found:
return found.group(1)
return 1
except:
pass
return 1
def getCodec(self, filename, codecs):
codecs = map(re.escape, codecs)
try:
codec = re.search('[^A-Z0-9](?P<codec>' + '|'.join(codecs) + ')[^A-Z0-9]', filename, re.I)
return (codec and codec.group('codec')) or ''
except:
return ''
def getGroup(self, file):
try:
match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I)
return match[-1] or ''
except:
return ''
def getSourceMedia(self, file):
for media in self.source_media:
for alias in self.source_media[media]:
if alias in file.lower():
return media
return None
def findYear(self, text):
# Search year inside () or [] first
matches = re.findall('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text)
if matches:
return matches[-1][1]
# Search normal
matches = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
if matches:
return matches[-1]
return ''
def getReleaseNameYear(self, release_name, file_name = None):
release_name = release_name.strip(' .-_')
# Use guessit first
guess = {}
if file_name:
try:
guessit = guess_movie_info(toUnicode(file_name))
if guessit.get('title') and guessit.get('year'):
guess = {
'name': guessit.get('title'),
'year': guessit.get('year'),
}
except:
log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc()))
# Backup to simple
cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
cleaned = re.sub(self.clean, ' ', cleaned)
for year_str in [file_name, release_name, cleaned]:
if not year_str: continue
year = self.findYear(year_str)
if year:
break
cp_guess = {}
if year: # Split name on year
try:
movie_name = cleaned.rsplit(year, 1).pop(0).strip()
if movie_name:
cp_guess = {
'name': movie_name,
'year': int(year),
}
except:
pass
if not cp_guess: # Split name on multiple spaces
try:
movie_name = cleaned.split(' ').pop(0).strip()
cp_guess = {
'name': movie_name,
'year': int(year) if movie_name[:4] != year else 0,
}
except:
pass
if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')):
return cp_guess
elif guess == {}:
return cp_guess
return guess
|
gpl-3.0
| -6,318,653,490,351,516,000
| 35.898889
| 351
| 0.523202
| false
| 4.019973
| false
| false
| false
|
mppmu/secdec
|
nodist_examples/hexabox/generate_I73_1.py
|
1
|
2168
|
#! /usr/bin/env python
from pySecDec.loop_integral import loop_package
import pySecDec as psd
li = psd.loop_integral.LoopIntegralFromPropagators(
loop_momenta = ['k1','k2'],
external_momenta = ['p1','p2','p3','p4','p5'],
#Lorentz_indices = ['mu'],
propagators = ['k1**2','(k1-p1)**2','(k1-p1-p2)**2','(k1-p1-p2-p3)**2','k2**2','(k2-p1-p2-p3-p4)**2','(k1-k2)**2','(k1-k2+p4)**2','(k2-p1)**2','(k2-p1-p2)**2','(k2-p1-p2-p3)**2'],
powerlist = [1,1,1,1,1,0,1,0,0,0,0],
#numerator = 'k1(mu)*k2(mu)',
replacement_rules = [
('p1*p1', 0),
('p2*p2', 0),
('p3*p3', 0),
('p4*p4', 0),
('p1*p2', 'v1/2'),
('p2*p3', 'v2/2'),
('p1*p3', '(v4-v1-v2)/2'),
('p1*p4', '(v2-v5-v4)/2'),
('p2*p4', '(-v2-v3+v5)/2'),
('p3*p4', 'v3/2')
]
)
Mandelstam_symbols = ['v1', 'v2','v3', 'v4', 'v5']
#mass_symbols = []
loop_package(
name = 'I73_1',
loop_integral = li,
real_parameters = Mandelstam_symbols,
# the highest order of the final epsilon expansion --> change this value to whatever you think is appropriate
requested_order = 0,
additional_prefactor = 'exp(2*EulerGamma*eps)',
# the optimization level to use in FORM (can be 0, 1, 2, 3)
form_optimization_level = 2,
# the WorkSpace parameter for FORM
form_work_space = '2G',
# the method to be used for the sector decomposition
# valid values are ``iterative`` or ``geometric`` or ``geometric_ku``
decomposition_method = 'geometric',
# if you choose ``geometric[_ku]`` and 'normaliz' is not in your
# $PATH, you can set the path to the 'normaliz' command-line
# executable here
#normaliz_executable='/path/to/normaliz',
# whether or not to produce code to perform the contour deformation
# contour deformation is not required if we only want to compute euclidean points (all Mandelstam invariants negative)
contour_deformation = False,
# no symmetries --> no need to run the full symmetry finder
#use_Pak = False,
)
# generates 34 sectors, no symmetries
|
gpl-3.0
| 7,686,921,309,786,268,000
| 29.111111
| 179
| 0.572417
| false
| 2.793814
| false
| false
| false
|
pkill-nine/qutebrowser
|
qutebrowser/config/configdata.py
|
1
|
75773
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Configuration data for config.py.
Module attributes:
FIRST_COMMENT: The initial comment header to place in the config.
SECTION_DESC: A dictionary with descriptions for sections.
DATA: A global read-only copy of the default config, an OrderedDict of
sections.
"""
import sys
import re
import collections
from qutebrowser.config import configtypes as typ
from qutebrowser.config import sections as sect
from qutebrowser.config.value import SettingValue
from qutebrowser.utils.qtutils import MAXVALS
from qutebrowser.utils import usertypes, qtutils
FIRST_COMMENT = r"""
# vim: ft=dosini
# Configfile for qutebrowser.
#
# This configfile is parsed by python's configparser in extended
# interpolation mode. The format is very INI-like, so there are
# categories like [general] with "key = value"-pairs.
#
# Note that you shouldn't add your own comments, as this file is
# regenerated every time the config is saved.
#
# Interpolation looks like ${value} or ${section:value} and will be
# replaced by the respective value.
#
# Some settings will expand environment variables. Note that, since
# interpolation is run first, you will need to escape the $ char as
# described below.
#
# This is the default config, so if you want to remove anything from
# here (as opposed to change/add), for example a key binding, set it to
# an empty value.
#
# You will need to escape the following values:
# - # at the start of the line (at the first position of the key) (\#)
# - $ in a value ($$)
"""
SECTION_DESC = {
'general': "General/miscellaneous options.",
'ui': "General options related to the user interface.",
'input': "Options related to input modes.",
'network': "Settings related to the network.",
'completion': "Options related to completion and command history.",
'tabs': "Configuration of the tab bar.",
'storage': "Settings related to cache and storage.",
'content': "Loaded plugins/scripts and allowed actions.",
'hints': "Hinting settings.",
'searchengines': (
"Definitions of search engines which can be used via the address "
"bar.\n"
"The searchengine named `DEFAULT` is used when "
"`general -> auto-search` is true and something else than a URL was "
"entered to be opened. Other search engines can be used by prepending "
"the search engine name to the search term, e.g. "
"`:open google qutebrowser`. The string `{}` will be replaced by the "
"search term, use `{{` and `}}` for literal `{`/`}` signs."),
'aliases': (
"Aliases for commands.\n"
"By default, no aliases are defined. Example which adds a new command "
"`:qtb` to open qutebrowsers website:\n\n"
"`qtb = open https://www.qutebrowser.org/`"),
'colors': (
"Colors used in the UI.\n"
"A value can be in one of the following format:\n\n"
" * `#RGB`/`#RRGGBB`/`#RRRGGGBBB`/`#RRRRGGGGBBBB`\n"
" * An SVG color name as specified in http://www.w3.org/TR/SVG/"
"types.html#ColorKeywords[the W3C specification].\n"
" * transparent (no color)\n"
" * `rgb(r, g, b)` / `rgba(r, g, b, a)` (values 0-255 or "
"percentages)\n"
" * `hsv(h, s, v)` / `hsva(h, s, v, a)` (values 0-255, hue 0-359)\n"
" * A gradient as explained in http://doc.qt.io/qt-5/"
"stylesheet-reference.html#list-of-property-types[the Qt "
"documentation] under ``Gradient''.\n\n"
"A *.system value determines the color system to use for color "
"interpolation between similarly-named *.start and *.stop entries, "
"regardless of how they are defined in the options. "
"Valid values are 'rgb', 'hsv', and 'hsl'.\n\n"
"The `hints.*` values are a special case as they're real CSS "
"colors, not Qt-CSS colors. There, for a gradient, you need to use "
"`-webkit-gradient`, see https://www.webkit.org/blog/175/introducing-"
"css-gradients/[the WebKit documentation]."),
'fonts': (
"Fonts used for the UI, with optional style/weight/size.\n\n"
" * Style: `normal`/`italic`/`oblique`\n"
" * Weight: `normal`, `bold`, `100`..`900`\n"
" * Size: _number_ `px`/`pt`"),
}
DEFAULT_FONT_SIZE = '10pt' if sys.platform == 'darwin' else '8pt'
def data(readonly=False):
"""Get the default config data.
Return:
A {name: section} OrderedDict.
"""
return collections.OrderedDict([
('general', sect.KeyValue(
('ignore-case',
SettingValue(typ.IgnoreCase(), 'smart'),
"Whether to find text on a page case-insensitively."),
('startpage',
SettingValue(typ.List(typ.String()),
'https://start.duckduckgo.com'),
"The default page(s) to open at the start, separated by commas."),
('yank-ignored-url-parameters',
SettingValue(typ.List(typ.String()),
'ref,utm_source,utm_medium,utm_campaign,utm_term,'
'utm_content'),
"The URL parameters to strip with :yank url, separated by "
"commas."),
('default-open-dispatcher',
SettingValue(typ.String(none_ok=True), ''),
"The default program used to open downloads. Set to an empty "
"string to use the default internal handler.\n\n"
"Any {} in the string will be expanded to the filename, else "
"the filename will be appended."),
('default-page',
SettingValue(typ.FuzzyUrl(), '${startpage}'),
"The page to open if :open -t/-b/-w is used without URL. Use "
"`about:blank` for a blank page."),
('auto-search',
SettingValue(typ.AutoSearch(), 'naive'),
"Whether to start a search when something else than a URL is "
"entered."),
('auto-save-config',
SettingValue(typ.Bool(), 'true'),
"Whether to save the config automatically on quit."),
('auto-save-interval',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '15000'),
"How often (in milliseconds) to auto-save config/cookies/etc."),
('editor',
SettingValue(typ.ShellCommand(placeholder=True), 'gvim -f "{}"'),
"The editor (and arguments) to use for the `open-editor` "
"command.\n\n"
"The arguments get split like in a shell, so you can use `\"` or "
"`'` to quote them.\n"
"`{}` gets replaced by the filename of the file to be edited."),
('editor-encoding',
SettingValue(typ.Encoding(), 'utf-8'),
"Encoding to use for editor."),
('private-browsing',
SettingValue(typ.Bool(), 'false'),
"Open new windows in private browsing mode which does not record "
"visited pages."),
('developer-extras',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Enable extra tools for Web developers.\n\n"
"This needs to be enabled for `:inspector` to work and also adds "
"an _Inspect_ entry to the context menu. For QtWebEngine, see "
"'qutebrowser --help' instead."),
('print-element-backgrounds',
SettingValue(typ.Bool(), 'true',
backends=(
None if qtutils.version_check('5.8', strict=True)
else [usertypes.Backend.QtWebKit])),
"Whether the background color and images are also drawn when the "
"page is printed.\n"
"This setting only works with Qt 5.8 or newer when using the "
"QtWebEngine backend."),
('xss-auditing',
SettingValue(typ.Bool(), 'false'),
"Whether load requests should be monitored for cross-site "
"scripting attempts.\n\n"
"Suspicious scripts will be blocked and reported in the "
"inspector's JavaScript console. Enabling this feature might "
"have an impact on performance."),
('default-encoding',
SettingValue(typ.String(), 'iso-8859-1'),
"Default encoding to use for websites.\n\n"
"The encoding must be a string describing an encoding such as "
"_utf-8_, _iso-8859-1_, etc."),
('new-instance-open-target',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('tab', "Open a new tab in the existing "
"window and activate the window."),
('tab-bg', "Open a new background tab in the "
"existing window and activate the "
"window."),
('tab-silent', "Open a new tab in the existing "
"window without activating "
"the window."),
('tab-bg-silent', "Open a new background tab "
"in the existing window "
"without activating the "
"window."),
('window', "Open in a new window.")
)), 'tab'),
"How to open links in an existing instance if a new one is "
"launched."),
('new-instance-open-target.window',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('first-opened', "Open new tabs in the first (oldest) "
"opened window."),
('last-opened', "Open new tabs in the last (newest) "
"opened window."),
('last-focused', "Open new tabs in the most recently "
"focused window."),
('last-visible', "Open new tabs in the most recently "
"visible window.")
)), 'last-focused'),
"Which window to choose when opening links as new tabs."),
('log-javascript-console',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('none', "Don't log messages."),
('debug', "Log messages with debug level."),
('info', "Log messages with info level.")
)), 'debug'),
"How to log javascript console messages."),
('save-session',
SettingValue(typ.Bool(), 'false'),
"Whether to always save the open pages."),
('session-default-name',
SettingValue(typ.SessionName(none_ok=True), ''),
"The name of the session to save by default, or empty for the "
"last loaded session."),
('url-incdec-segments',
SettingValue(
typ.FlagList(valid_values=typ.ValidValues(
'host', 'path', 'query', 'anchor')),
'path,query'),
"The URL segments where `:navigate increment/decrement` will "
"search for a number."),
readonly=readonly
)),
('ui', sect.KeyValue(
('history-session-interval',
SettingValue(typ.Int(), '30'),
"The maximum time in minutes between two history items for them "
"to be considered being from the same session. Use -1 to "
"disable separation."),
('zoom-levels',
SettingValue(typ.List(typ.Perc(minval=0)),
'25%,33%,50%,67%,75%,90%,100%,110%,125%,150%,175%,'
'200%,250%,300%,400%,500%'),
"The available zoom levels, separated by commas."),
('default-zoom',
SettingValue(typ.Perc(), '100%'),
"The default zoom level."),
('downloads-position',
SettingValue(typ.VerticalPosition(), 'top'),
"Where to show the downloaded files."),
('status-position',
SettingValue(typ.VerticalPosition(), 'bottom'),
"The position of the status bar."),
('message-timeout',
SettingValue(typ.Int(minval=0), '2000'),
"Time (in ms) to show messages in the statusbar for.\n"
"Set to 0 to never clear messages."),
('message-unfocused',
SettingValue(typ.Bool(), 'false'),
"Whether to show messages in unfocused windows."),
('confirm-quit',
SettingValue(typ.ConfirmQuit(), 'never'),
"Whether to confirm quitting the application."),
('zoom-text-only',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Whether the zoom factor on a frame applies only to the text or "
"to all content."),
('frame-flattening',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Whether to expand each subframe to its contents.\n\n"
"This will flatten all the frames to become one scrollable "
"page."),
('user-stylesheet',
SettingValue(typ.File(none_ok=True), ''),
"User stylesheet to use (absolute filename or filename relative "
"to the config directory). Will expand environment variables."),
('hide-scrollbar',
SettingValue(typ.Bool(), 'true'),
"Hide the main scrollbar."),
('smooth-scrolling',
SettingValue(typ.Bool(), 'false'),
"Whether to enable smooth scrolling for web pages. Note smooth "
"scrolling does not work with the :scroll-px command."),
('remove-finished-downloads',
SettingValue(typ.Int(minval=-1), '-1'),
"Number of milliseconds to wait before removing finished "
"downloads. Will not be removed if value is -1."),
('hide-statusbar',
SettingValue(typ.Bool(), 'false'),
"Whether to hide the statusbar unless a message is shown."),
('statusbar-padding',
SettingValue(typ.Padding(), '1,1,0,0'),
"Padding for statusbar (top, bottom, left, right)."),
('window-title-format',
SettingValue(typ.FormatString(fields=['perc', 'perc_raw', 'title',
'title_sep', 'id',
'scroll_pos', 'host',
'backend', 'private']),
'{perc}{title}{title_sep}qutebrowser'),
"The format to use for the window title. The following "
"placeholders are defined:\n\n"
"* `{perc}`: The percentage as a string like `[10%]`.\n"
"* `{perc_raw}`: The raw percentage, e.g. `10`\n"
"* `{title}`: The title of the current web page\n"
"* `{title_sep}`: The string ` - ` if a title is set, empty "
"otherwise.\n"
"* `{id}`: The internal window ID of this window.\n"
"* `{scroll_pos}`: The page scroll position.\n"
"* `{host}`: The host of the current web page.\n"
"* `{backend}`: Either 'webkit' or 'webengine'\n"
"* `{private}` : Indicates when private mode is enabled.\n"),
('modal-js-dialog',
SettingValue(typ.Bool(), 'false'),
"Use standard JavaScript modal dialog for alert() and confirm()"),
('hide-wayland-decoration',
SettingValue(typ.Bool(), 'false'),
"Hide the window decoration when using wayland "
"(requires restart)"),
('keyhint-blacklist',
SettingValue(typ.List(typ.String(), none_ok=True), ''),
"Keychains that shouldn't be shown in the keyhint dialog\n\n"
"Globs are supported, so ';*' will blacklist all keychains"
"starting with ';'. Use '*' to disable keyhints"),
('keyhint-delay',
SettingValue(typ.Int(minval=0), '500'),
"Time from pressing a key to seeing the keyhint dialog (ms)"),
('prompt-radius',
SettingValue(typ.Int(minval=0), '8'),
"The rounding radius for the edges of prompts."),
('prompt-filebrowser',
SettingValue(typ.Bool(), 'true'),
"Show a filebrowser in upload/download prompts."),
readonly=readonly
)),
('network', sect.KeyValue(
('do-not-track',
SettingValue(typ.Bool(), 'true'),
"Value to send in the `DNT` header."),
('accept-language',
SettingValue(typ.String(none_ok=True), 'en-US,en'),
"Value to send in the `accept-language` header."),
('referer-header',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('always', "Always send."),
('never', "Never send; this is not recommended,"
" as some sites may break."),
('same-domain', "Only send for the same domain."
" This will still protect your privacy, but"
" shouldn't break any sites.")
)), 'same-domain', backends=[usertypes.Backend.QtWebKit]),
"Send the Referer header"),
('user-agent',
SettingValue(typ.UserAgent(none_ok=True), ''),
"User agent to send. Empty to send the default."),
('proxy',
SettingValue(typ.Proxy(), 'system'),
"The proxy to use.\n\n"
"In addition to the listed values, you can use a `socks://...` "
"or `http://...` URL."),
('proxy-dns-requests',
SettingValue(typ.Bool(), 'true',
backends=[usertypes.Backend.QtWebKit]),
"Whether to send DNS requests over the configured proxy."),
('ssl-strict',
SettingValue(typ.BoolAsk(), 'ask'),
"Whether to validate SSL handshakes."),
('dns-prefetch',
SettingValue(typ.Bool(), 'true',
backends=[usertypes.Backend.QtWebKit]),
"Whether to try to pre-fetch DNS entries to speed up browsing."),
('custom-headers',
SettingValue(typ.HeaderDict(none_ok=True), ''),
"Set custom headers for qutebrowser HTTP requests."),
('netrc-file',
SettingValue(typ.File(none_ok=True), ''),
"Set location of a netrc-file for HTTP authentication. If empty, "
"~/.netrc is used."),
readonly=readonly
)),
('completion', sect.KeyValue(
('show',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('always', "Whenever a completion is available."),
('auto', "Whenever a completion is requested."),
('never', "Never.")
)), 'always'),
"When to show the autocompletion window."),
('download-path-suggestion',
SettingValue(
typ.String(valid_values=typ.ValidValues(
('path', "Show only the download path."),
('filename', "Show only download filename."),
('both', "Show download path and filename."))),
'path'),
"What to display in the download filename input."),
('timestamp-format',
SettingValue(typ.TimestampTemplate(none_ok=True), '%Y-%m-%d'),
"How to format timestamps (e.g. for history)"),
('height',
SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1),
'50%'),
"The height of the completion, in px or as percentage of the "
"window."),
('cmd-history-max-items',
SettingValue(typ.Int(minval=-1), '100'),
"How many commands to save in the command history.\n\n"
"0: no history / -1: unlimited"),
('web-history-max-items',
SettingValue(typ.Int(minval=-1, maxval=MAXVALS['int64']), '-1'),
"How many URLs to show in the web history.\n\n"
"0: no history / -1: unlimited"),
('quick-complete',
SettingValue(typ.Bool(), 'true'),
"Whether to move on to the next part when there's only one "
"possible completion left."),
('shrink',
SettingValue(typ.Bool(), 'false'),
"Whether to shrink the completion to be smaller than the "
"configured size if there are no scrollbars."),
('scrollbar-width',
SettingValue(typ.Int(minval=0), '12'),
"Width of the scrollbar in the completion window (in px)."),
('scrollbar-padding',
SettingValue(typ.Int(minval=0), '2'),
"Padding of scrollbar handle in completion window (in px)."),
readonly=readonly
)),
('input', sect.KeyValue(
('timeout',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '500'),
"Timeout (in milliseconds) for ambiguous key bindings.\n\n"
"If the current input forms both a complete match and a partial "
"match, the complete match will be executed after this time."),
('partial-timeout',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '5000'),
"Timeout (in milliseconds) for partially typed key bindings.\n\n"
"If the current input forms only partial matches, the keystring "
"will be cleared after this time."),
('insert-mode-on-plugins',
SettingValue(typ.Bool(), 'false'),
"Whether to switch to insert mode when clicking flash and other "
"plugins."),
('auto-leave-insert-mode',
SettingValue(typ.Bool(), 'true'),
"Whether to leave insert mode if a non-editable element is "
"clicked."),
('auto-insert-mode',
SettingValue(typ.Bool(), 'false'),
"Whether to automatically enter insert mode if an editable "
"element is focused after page load."),
('forward-unbound-keys',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('all', "Forward all unbound keys."),
('auto', "Forward unbound non-alphanumeric "
"keys."),
('none', "Don't forward any keys.")
)), 'auto'),
"Whether to forward unbound keys to the webview in normal mode."),
('spatial-navigation',
SettingValue(typ.Bool(), 'false'),
"Enables or disables the Spatial Navigation feature.\n\n"
"Spatial navigation consists in the ability to navigate between "
"focusable elements in a Web page, such as hyperlinks and form "
"controls, by using Left, Right, Up and Down arrow keys. For "
"example, if a user presses the Right key, heuristics determine "
"whether there is an element he might be trying to reach towards "
"the right and which element he probably wants."),
('links-included-in-focus-chain',
SettingValue(typ.Bool(), 'true'),
"Whether hyperlinks should be included in the keyboard focus "
"chain."),
('rocker-gestures',
SettingValue(typ.Bool(), 'false'),
"Whether to enable Opera-like mouse rocker gestures. This "
"disables the context menu."),
('mouse-zoom-divider',
SettingValue(typ.Int(minval=0), '512'),
"How much to divide the mouse wheel movements to translate them "
"into zoom increments."),
readonly=readonly
)),
('tabs', sect.KeyValue(
('background-tabs',
SettingValue(typ.Bool(), 'false'),
"Whether to open new tabs (middleclick/ctrl+click) in "
"background."),
('select-on-remove',
SettingValue(typ.SelectOnRemove(), 'next'),
"Which tab to select when the focused tab is removed."),
('new-tab-position',
SettingValue(typ.NewTabPosition(), 'next'),
"How new tabs are positioned."),
('new-tab-position-explicit',
SettingValue(typ.NewTabPosition(), 'last'),
"How new tabs opened explicitly are positioned."),
('last-close',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('ignore', "Don't do anything."),
('blank', "Load a blank page."),
('startpage', "Load the start page."),
('default-page', "Load the default page."),
('close', "Close the window.")
)), 'ignore'),
"Behavior when the last tab is closed."),
('show',
SettingValue(
typ.String(valid_values=typ.ValidValues(
('always', "Always show the tab bar."),
('never', "Always hide the tab bar."),
('multiple', "Hide the tab bar if only one tab "
"is open."),
('switching', "Show the tab bar when switching "
"tabs.")
)), 'always'),
"When to show the tab bar"),
('show-switching-delay',
SettingValue(typ.Int(), '800'),
"Time to show the tab bar before hiding it when tabs->show is "
"set to 'switching'."),
('wrap',
SettingValue(typ.Bool(), 'true'),
"Whether to wrap when changing tabs."),
('movable',
SettingValue(typ.Bool(), 'true'),
"Whether tabs should be movable."),
('close-mouse-button',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('right', "Close tabs on right-click."),
('middle', "Close tabs on middle-click."),
('none', "Don't close tabs using the mouse.")
)), 'middle'),
"On which mouse button to close tabs."),
('position',
SettingValue(typ.Position(), 'top'),
"The position of the tab bar."),
('show-favicons',
SettingValue(typ.Bool(), 'true'),
"Whether to show favicons in the tab bar."),
('favicon-scale',
SettingValue(typ.Float(minval=0.0), '1.0'),
"Scale for favicons in the tab bar. The tab size is unchanged, "
"so big favicons also require extra `tabs->padding`."),
('width',
SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1),
'20%'),
"The width of the tab bar if it's vertical, in px or as "
"percentage of the window."),
('pinned-width',
SettingValue(typ.Int(minval=10),
'43'),
"The width for pinned tabs with a horizontal tabbar, in px."),
('indicator-width',
SettingValue(typ.Int(minval=0), '3'),
"Width of the progress indicator (0 to disable)."),
('tabs-are-windows',
SettingValue(typ.Bool(), 'false'),
"Whether to open windows instead of tabs."),
('title-format',
SettingValue(typ.FormatString(
fields=['perc', 'perc_raw', 'title', 'title_sep', 'index',
'id', 'scroll_pos', 'host', 'private'], none_ok=True),
'{index}: {title}'),
"The format to use for the tab title. The following placeholders "
"are defined:\n\n"
"* `{perc}`: The percentage as a string like `[10%]`.\n"
"* `{perc_raw}`: The raw percentage, e.g. `10`\n"
"* `{title}`: The title of the current web page\n"
"* `{title_sep}`: The string ` - ` if a title is set, empty "
"otherwise.\n"
"* `{index}`: The index of this tab.\n"
"* `{id}`: The internal tab ID of this tab.\n"
"* `{scroll_pos}`: The page scroll position.\n"
"* `{host}`: The host of the current web page.\n"
"* `{backend}`: Either 'webkit' or 'webengine'\n"
"* `{private}` : Indicates when private mode is enabled.\n"),
('title-format-pinned',
SettingValue(typ.FormatString(
fields=['perc', 'perc_raw', 'title', 'title_sep', 'index',
'id', 'scroll_pos', 'host', 'private'], none_ok=True),
'{index}'),
"The format to use for the tab title for pinned tabs. "
"The same placeholders like for title-format are defined."),
('title-alignment',
SettingValue(typ.TextAlignment(), 'left'),
"Alignment of the text inside of tabs"),
('mousewheel-tab-switching',
SettingValue(typ.Bool(), 'true'),
"Switch between tabs using the mouse wheel."),
('padding',
SettingValue(typ.Padding(), '0,0,5,5'),
"Padding for tabs (top, bottom, left, right)."),
('indicator-padding',
SettingValue(typ.Padding(), '2,2,0,4'),
"Padding for indicators (top, bottom, left, right)."),
readonly=readonly
)),
('storage', sect.KeyValue(
('download-directory',
SettingValue(typ.Directory(none_ok=True), ''),
"The directory to save downloads to. An empty value selects a "
"sensible os-specific default. Will expand environment "
"variables."),
('prompt-download-directory',
SettingValue(typ.Bool(), 'true'),
"Whether to prompt the user for the download location.\n"
"If set to false, 'download-directory' will be used."),
('remember-download-directory',
SettingValue(typ.Bool(), 'true'),
"Whether to remember the last used download directory."),
# Defaults from QWebSettings::QWebSettings() in
# qtwebkit/Source/WebKit/qt/Api/qwebsettings.cpp
('maximum-pages-in-cache',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '0',
backends=[usertypes.Backend.QtWebKit]),
"The maximum number of pages to hold in the global memory page "
"cache.\n\n"
"The Page Cache allows for a nicer user experience when "
"navigating forth or back to pages in the forward/back history, "
"by pausing and resuming up to _n_ pages.\n\n"
"For more information about the feature, please refer to: "
"http://webkit.org/blog/427/webkit-page-cache-i-the-basics/"),
('offline-web-application-cache',
SettingValue(typ.Bool(), 'true',
backends=[usertypes.Backend.QtWebKit]),
"Whether support for the HTML 5 web application cache feature is "
"enabled.\n\n"
"An application cache acts like an HTTP cache in some sense. For "
"documents that use the application cache via JavaScript, the "
"loader engine will first ask the application cache for the "
"contents, before hitting the network.\n\n"
"The feature is described in details at: "
"http://dev.w3.org/html5/spec/Overview.html#appcache"),
('local-storage',
SettingValue(typ.Bool(), 'true'),
"Whether support for HTML 5 local storage and Web SQL is "
"enabled."),
('cache-size',
SettingValue(typ.Int(none_ok=True, minval=0,
maxval=MAXVALS['int64']), ''),
"Size of the HTTP network cache. Empty to use the default "
"value."),
readonly=readonly
)),
('content', sect.KeyValue(
('allow-images',
SettingValue(typ.Bool(), 'true'),
"Whether images are automatically loaded in web pages."),
('allow-javascript',
SettingValue(typ.Bool(), 'true'),
"Enables or disables the running of JavaScript programs."),
('allow-plugins',
SettingValue(typ.Bool(), 'false'),
"Enables or disables plugins in Web pages.\n\n"
'Qt plugins with a mimetype such as "application/x-qt-plugin" '
"are not affected by this setting."),
('webgl',
SettingValue(typ.Bool(), 'true'),
"Enables or disables WebGL."),
('hyperlink-auditing',
SettingValue(typ.Bool(), 'false'),
"Enable or disable hyperlink auditing (<a ping>)."),
('geolocation',
SettingValue(typ.BoolAsk(), 'ask'),
"Allow websites to request geolocations."),
('notifications',
SettingValue(typ.BoolAsk(), 'ask'),
"Allow websites to show notifications."),
('media-capture',
SettingValue(typ.BoolAsk(), 'ask',
backends=[usertypes.Backend.QtWebEngine]),
"Allow websites to record audio/video."),
('javascript-can-open-windows-automatically',
SettingValue(typ.Bool(), 'false'),
"Whether JavaScript programs can open new windows without user "
"interaction."),
('javascript-can-close-windows',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Whether JavaScript programs can close windows."),
('javascript-can-access-clipboard',
SettingValue(typ.Bool(), 'false'),
"Whether JavaScript programs can read or write to the "
"clipboard.\nWith QtWebEngine, writing the clipboard as response "
"to a user interaction is always allowed."),
('ignore-javascript-prompt',
SettingValue(typ.Bool(), 'false'),
"Whether all javascript prompts should be ignored."),
('ignore-javascript-alert',
SettingValue(typ.Bool(), 'false'),
"Whether all javascript alerts should be ignored."),
('local-content-can-access-remote-urls',
SettingValue(typ.Bool(), 'false'),
"Whether locally loaded documents are allowed to access remote "
"urls."),
('local-content-can-access-file-urls',
SettingValue(typ.Bool(), 'true'),
"Whether locally loaded documents are allowed to access other "
"local urls."),
('cookies-accept',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('all', "Accept all cookies."),
('no-3rdparty', "Accept cookies from the same"
" origin only."),
('no-unknown-3rdparty', "Accept cookies from "
"the same origin only, unless a cookie is "
"already set for the domain."),
('never', "Don't accept cookies at all.")
)), 'no-3rdparty', backends=[usertypes.Backend.QtWebKit]),
"Control which cookies to accept."),
('cookies-store',
SettingValue(typ.Bool(), 'true'),
"Whether to store cookies. Note this option needs a restart with "
"QtWebEngine on Qt < 5.9."),
('host-block-lists',
SettingValue(
typ.List(typ.Url(), none_ok=True),
'https://www.malwaredomainlist.com/hostslist/hosts.txt,'
'http://someonewhocares.org/hosts/hosts,'
'http://winhelp2002.mvps.org/hosts.zip,'
'http://malwaredomains.lehigh.edu/files/justdomains.zip,'
'https://pgl.yoyo.org/adservers/serverlist.php?'
'hostformat=hosts&mimetype=plaintext'),
"List of URLs of lists which contain hosts to block.\n\n"
"The file can be in one of the following formats:\n\n"
"- An '/etc/hosts'-like file\n"
"- One host per line\n"
"- A zip-file of any of the above, with either only one file, or "
"a file named 'hosts' (with any extension)."),
('host-blocking-enabled',
SettingValue(typ.Bool(), 'true'),
"Whether host blocking is enabled."),
('host-blocking-whitelist',
SettingValue(typ.List(typ.String(), none_ok=True), 'piwik.org'),
"List of domains that should always be loaded, despite being "
"ad-blocked.\n\n"
"Domains may contain * and ? wildcards and are otherwise "
"required to exactly match the requested domain.\n\n"
"Local domains are always exempt from hostblocking."),
('enable-pdfjs', SettingValue(typ.Bool(), 'false'),
"Enable pdf.js to view PDF files in the browser.\n\n"
"Note that the files can still be downloaded by clicking"
" the download button in the pdf.js viewer."),
readonly=readonly
)),
('hints', sect.KeyValue(
('border',
SettingValue(typ.String(), '1px solid #E3BE23'),
"CSS border value for hints."),
('mode',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('number', "Use numeric hints. (In this mode you can "
"also type letters form the hinted element to filter "
"and reduce the number of elements that are hinted.)"),
('letter', "Use the chars in the hints -> "
"chars setting."),
('word', "Use hints words based on the html "
"elements and the extra words."),
)), 'letter'),
"Mode to use for hints."),
('chars',
SettingValue(typ.UniqueCharString(minlen=2, completions=[
('asdfghjkl', "Home row"),
('aoeuidnths', "Home row (Dvorak)"),
('abcdefghijklmnopqrstuvwxyz', "All letters"),
]), 'asdfghjkl'),
"Chars used for hint strings."),
('min-chars',
SettingValue(typ.Int(minval=1), '1'),
"Minimum number of chars used for hint strings."),
('scatter',
SettingValue(typ.Bool(), 'true'),
"Whether to scatter hint key chains (like Vimium) or not (like "
"dwb). Ignored for number hints."),
('uppercase',
SettingValue(typ.Bool(), 'false'),
"Make chars in hint strings uppercase."),
('dictionary',
SettingValue(typ.File(required=False), '/usr/share/dict/words'),
"The dictionary file to be used by the word hints."),
('auto-follow',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('always', "Auto-follow whenever there is only a single "
"hint on a page."),
('unique-match', "Auto-follow whenever there is a unique "
"non-empty match in either the hint string (word mode) "
"or filter (number mode)."),
('full-match', "Follow the hint when the user typed the "
"whole hint (letter, word or number mode) or the "
"element's text (only in number mode)."),
('never', "The user will always need to press Enter to "
"follow a hint."),
)), 'unique-match'),
"Controls when a hint can be automatically followed without the "
"user pressing Enter."),
('auto-follow-timeout',
SettingValue(typ.Int(), '0'),
"A timeout (in milliseconds) to inhibit normal-mode key bindings "
"after a successful auto-follow."),
('next-regexes',
SettingValue(typ.List(typ.Regex(flags=re.IGNORECASE)),
r'\bnext\b,\bmore\b,\bnewer\b,\b[>→≫]\b,\b(>>|»)\b,'
r'\bcontinue\b'),
"A comma-separated list of regexes to use for 'next' links."),
('prev-regexes',
SettingValue(typ.List(typ.Regex(flags=re.IGNORECASE)),
r'\bprev(ious)?\b,\bback\b,\bolder\b,\b[<←≪]\b,'
r'\b(<<|«)\b'),
"A comma-separated list of regexes to use for 'prev' links."),
('find-implementation',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('javascript', "Better but slower"),
('python', "Slightly worse but faster"),
)), 'python'),
"Which implementation to use to find elements to hint."),
('hide-unmatched-rapid-hints',
SettingValue(typ.Bool(), 'true'),
"Controls hiding unmatched hints in rapid mode."),
readonly=readonly
)),
('searchengines', sect.ValueList(
typ.SearchEngineName(), typ.SearchEngineUrl(),
('DEFAULT', 'https://duckduckgo.com/?q={}'),
readonly=readonly
)),
('aliases', sect.ValueList(
typ.String(forbidden=' '), typ.Command(),
readonly=readonly
)),
('colors', sect.KeyValue(
('completion.fg',
SettingValue(typ.QtColor(), 'white'),
"Text color of the completion widget."),
('completion.bg',
SettingValue(typ.QssColor(), '#333333'),
"Background color of the completion widget."),
('completion.alternate-bg',
SettingValue(typ.QssColor(), '#444444'),
"Alternating background color of the completion widget."),
('completion.category.fg',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of completion widget category headers."),
('completion.category.bg',
SettingValue(typ.QssColor(), 'qlineargradient(x1:0, y1:0, x2:0, '
'y2:1, stop:0 #888888, stop:1 #505050)'),
"Background color of the completion widget category headers."),
('completion.category.border.top',
SettingValue(typ.QssColor(), 'black'),
"Top border color of the completion widget category headers."),
('completion.category.border.bottom',
SettingValue(typ.QssColor(), '${completion.category.border.top}'),
"Bottom border color of the completion widget category headers."),
('completion.item.selected.fg',
SettingValue(typ.QtColor(), 'black'),
"Foreground color of the selected completion item."),
('completion.item.selected.bg',
SettingValue(typ.QssColor(), '#e8c000'),
"Background color of the selected completion item."),
('completion.item.selected.border.top',
SettingValue(typ.QssColor(), '#bbbb00'),
"Top border color of the completion widget category headers."),
('completion.item.selected.border.bottom',
SettingValue(
typ.QssColor(), '${completion.item.selected.border.top}'),
"Bottom border color of the selected completion item."),
('completion.match.fg',
SettingValue(typ.QssColor(), '#ff4444'),
"Foreground color of the matched text in the completion."),
('completion.scrollbar.fg',
SettingValue(typ.QssColor(), '${completion.fg}'),
"Color of the scrollbar handle in completion view."),
('completion.scrollbar.bg',
SettingValue(typ.QssColor(), '${completion.bg}'),
"Color of the scrollbar in completion view"),
('statusbar.fg',
SettingValue(typ.QssColor(), 'white'),
"Foreground color of the statusbar."),
('statusbar.bg',
SettingValue(typ.QssColor(), 'black'),
"Background color of the statusbar."),
('statusbar.fg.private',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in private browsing mode."),
('statusbar.bg.private',
SettingValue(typ.QssColor(), '#666666'),
"Background color of the statusbar in private browsing mode."),
('statusbar.fg.insert',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in insert mode."),
('statusbar.bg.insert',
SettingValue(typ.QssColor(), 'darkgreen'),
"Background color of the statusbar in insert mode."),
('statusbar.fg.command',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in command mode."),
('statusbar.bg.command',
SettingValue(typ.QssColor(), '${statusbar.bg}'),
"Background color of the statusbar in command mode."),
('statusbar.fg.command.private',
SettingValue(typ.QssColor(), '${statusbar.fg.private}'),
"Foreground color of the statusbar in private browsing + command "
"mode."),
('statusbar.bg.command.private',
SettingValue(typ.QssColor(), '${statusbar.bg.private}'),
"Background color of the statusbar in private browsing + command "
"mode."),
('statusbar.fg.caret',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in caret mode."),
('statusbar.bg.caret',
SettingValue(typ.QssColor(), 'purple'),
"Background color of the statusbar in caret mode."),
('statusbar.fg.caret-selection',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in caret mode with a "
"selection"),
('statusbar.bg.caret-selection',
SettingValue(typ.QssColor(), '#a12dff'),
"Background color of the statusbar in caret mode with a "
"selection"),
('statusbar.progress.bg',
SettingValue(typ.QssColor(), 'white'),
"Background color of the progress bar."),
('statusbar.url.fg',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Default foreground color of the URL in the statusbar."),
('statusbar.url.fg.success',
SettingValue(typ.QssColor(), 'white'),
"Foreground color of the URL in the statusbar on successful "
"load (http)."),
('statusbar.url.fg.success.https',
SettingValue(typ.QssColor(), 'lime'),
"Foreground color of the URL in the statusbar on successful "
"load (https)."),
('statusbar.url.fg.error',
SettingValue(typ.QssColor(), 'orange'),
"Foreground color of the URL in the statusbar on error."),
('statusbar.url.fg.warn',
SettingValue(typ.QssColor(), 'yellow'),
"Foreground color of the URL in the statusbar when there's a "
"warning."),
('statusbar.url.fg.hover',
SettingValue(typ.QssColor(), 'aqua'),
"Foreground color of the URL in the statusbar for hovered "
"links."),
('tabs.fg.odd',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of unselected odd tabs."),
('tabs.bg.odd',
SettingValue(typ.QtColor(), 'grey'),
"Background color of unselected odd tabs."),
('tabs.fg.even',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of unselected even tabs."),
('tabs.bg.even',
SettingValue(typ.QtColor(), 'darkgrey'),
"Background color of unselected even tabs."),
('tabs.fg.selected.odd',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of selected odd tabs."),
('tabs.bg.selected.odd',
SettingValue(typ.QtColor(), 'black'),
"Background color of selected odd tabs."),
('tabs.fg.selected.even',
SettingValue(typ.QtColor(), '${tabs.fg.selected.odd}'),
"Foreground color of selected even tabs."),
('tabs.bg.selected.even',
SettingValue(typ.QtColor(), '${tabs.bg.selected.odd}'),
"Background color of selected even tabs."),
('tabs.bg.bar',
SettingValue(typ.QtColor(), '#555555'),
"Background color of the tab bar."),
('tabs.indicator.start',
SettingValue(typ.QtColor(), '#0000aa'),
"Color gradient start for the tab indicator."),
('tabs.indicator.stop',
SettingValue(typ.QtColor(), '#00aa00'),
"Color gradient end for the tab indicator."),
('tabs.indicator.error',
SettingValue(typ.QtColor(), '#ff0000'),
"Color for the tab indicator on errors.."),
('tabs.indicator.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for the tab indicator."),
('hints.fg',
SettingValue(typ.QssColor(), 'black'),
"Font color for hints."),
('hints.bg',
SettingValue(typ.QssColor(), 'qlineargradient(x1:0, y1:0, x2:0, '
'y2:1, stop:0 rgba(255, 247, 133, 0.8), '
'stop:1 rgba(255, 197, 66, 0.8))'),
"Background color for hints. Note that you can use a `rgba(...)` "
"value for transparency."),
('hints.fg.match',
SettingValue(typ.QssColor(), 'green'),
"Font color for the matched part of hints."),
('downloads.bg.bar',
SettingValue(typ.QssColor(), 'black'),
"Background color for the download bar."),
('downloads.fg.start',
SettingValue(typ.QtColor(), 'white'),
"Color gradient start for download text."),
('downloads.bg.start',
SettingValue(typ.QtColor(), '#0000aa'),
"Color gradient start for download backgrounds."),
('downloads.fg.stop',
SettingValue(typ.QtColor(), '${downloads.fg.start}'),
"Color gradient end for download text."),
('downloads.bg.stop',
SettingValue(typ.QtColor(), '#00aa00'),
"Color gradient stop for download backgrounds."),
('downloads.fg.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for download text."),
('downloads.bg.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for download backgrounds."),
('downloads.fg.error',
SettingValue(typ.QtColor(), 'white'),
"Foreground color for downloads with errors."),
('downloads.bg.error',
SettingValue(typ.QtColor(), 'red'),
"Background color for downloads with errors."),
('webpage.bg',
SettingValue(typ.QtColor(none_ok=True), 'white'),
"Background color for webpages if unset (or empty to use the "
"theme's color)"),
('keyhint.fg',
SettingValue(typ.QssColor(), '#FFFFFF'),
"Text color for the keyhint widget."),
('keyhint.fg.suffix',
SettingValue(typ.CssColor(), '#FFFF00'),
"Highlight color for keys to complete the current keychain"),
('keyhint.bg',
SettingValue(typ.QssColor(), 'rgba(0, 0, 0, 80%)'),
"Background color of the keyhint widget."),
('messages.fg.error',
SettingValue(typ.QssColor(), 'white'),
"Foreground color of an error message."),
('messages.bg.error',
SettingValue(typ.QssColor(), 'red'),
"Background color of an error message."),
('messages.border.error',
SettingValue(typ.QssColor(), '#bb0000'),
"Border color of an error message."),
('messages.fg.warning',
SettingValue(typ.QssColor(), 'white'),
"Foreground color a warning message."),
('messages.bg.warning',
SettingValue(typ.QssColor(), 'darkorange'),
"Background color of a warning message."),
('messages.border.warning',
SettingValue(typ.QssColor(), '#d47300'),
"Border color of an error message."),
('messages.fg.info',
SettingValue(typ.QssColor(), 'white'),
"Foreground color an info message."),
('messages.bg.info',
SettingValue(typ.QssColor(), 'black'),
"Background color of an info message."),
('messages.border.info',
SettingValue(typ.QssColor(), '#333333'),
"Border color of an info message."),
('prompts.fg',
SettingValue(typ.QssColor(), 'white'),
"Foreground color for prompts."),
('prompts.bg',
SettingValue(typ.QssColor(), 'darkblue'),
"Background color for prompts."),
('prompts.selected.bg',
SettingValue(typ.QssColor(), '#308cc6'),
"Background color for the selected item in filename prompts."),
readonly=readonly
)),
('fonts', sect.KeyValue(
('_monospace',
SettingValue(typ.Font(), 'xos4 Terminus, Terminus, Monospace, '
'"DejaVu Sans Mono", Monaco, '
'"Bitstream Vera Sans Mono", "Andale Mono", '
'"Courier New", Courier, "Liberation Mono", '
'monospace, Fixed, Consolas, Terminal'),
"Default monospace fonts."),
('completion',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the completion widget."),
('completion.category',
SettingValue(typ.Font(), 'bold ${completion}'),
"Font used in the completion categories."),
('tabbar',
SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the tab bar."),
('statusbar',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the statusbar."),
('downloads',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for the downloadbar."),
('hints',
SettingValue(typ.Font(), 'bold 13px ${_monospace}'),
"Font used for the hints."),
('debug-console',
SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for the debugging console."),
('web-family-standard',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for standard fonts."),
('web-family-fixed',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for fixed fonts."),
('web-family-serif',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for serif fonts."),
('web-family-sans-serif',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for sans-serif fonts."),
('web-family-cursive',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for cursive fonts."),
('web-family-fantasy',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for fantasy fonts."),
# Defaults for web-size-* from WebEngineSettings::initDefaults in
# qtwebengine/src/core/web_engine_settings.cpp and
# QWebSettings::QWebSettings() in
# qtwebkit/Source/WebKit/qt/Api/qwebsettings.cpp
('web-size-minimum',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '0'),
"The hard minimum font size."),
# This is 0 as default on QtWebKit, and 6 on QtWebEngine - so let's
# just go for 6 here.
('web-size-minimum-logical',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '6'),
"The minimum logical font size that is applied when zooming "
"out."),
('web-size-default',
SettingValue(typ.Int(minval=1, maxval=MAXVALS['int']), '16'),
"The default font size for regular text."),
('web-size-default-fixed',
SettingValue(typ.Int(minval=1, maxval=MAXVALS['int']), '13'),
"The default font size for fixed-pitch text."),
('keyhint',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the keyhint widget."),
('messages.error',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for error messages."),
('messages.warning',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for warning messages."),
('messages.info',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for info messages."),
('prompts',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' sans-serif'),
"Font used for prompts."),
readonly=readonly
)),
])
DATA = data(readonly=True)
KEY_FIRST_COMMENT = """
# vim: ft=conf
#
# In this config file, qutebrowser's key bindings are configured.
# The format looks like this:
#
# [keymode]
#
# command
# keychain
# keychain2
# ...
#
# All blank lines and lines starting with '#' are ignored.
# Inline-comments are not permitted.
#
# keymode is a comma separated list of modes in which the key binding should be
# active. If keymode starts with !, the key binding is active in all modes
# except the listed modes.
#
# For special keys (can't be part of a keychain), enclose them in `<`...`>`.
# For modifiers, you can use either `-` or `+` as delimiters, and these names:
#
# * Control: `Control`, `Ctrl`
# * Meta: `Meta`, `Windows`, `Mod4`
# * Alt: `Alt`, `Mod1`
# * Shift: `Shift`
#
# For simple keys (no `<>`-signs), a capital letter means the key is pressed
# with Shift. For special keys (with `<>`-signs), you need to explicitly add
# `Shift-` to match a key pressed with shift.
#
# Note that default keybindings are always bound, and need to be explicitly
# unbound if you wish to remove them:
#
# <unbound>
# keychain
# keychain2
# ...
"""
KEY_SECTION_DESC = {
'all': "Keybindings active in all modes.",
'normal': "Keybindings for normal mode.",
'insert': (
"Keybindings for insert mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `open-editor`: Open a texteditor with the focused field.\n"
" * `paste-primary`: Paste primary selection at cursor position."),
'hint': (
"Keybindings for hint mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `follow-hint`: Follow the currently selected hint."),
'passthrough': (
"Keybindings for passthrough mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode."),
'command': (
"Keybindings for command mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `command-history-prev`: Switch to previous command in history.\n"
" * `command-history-next`: Switch to next command in history.\n"
" * `completion-item-focus`: Select another item in completion.\n"
" * `command-accept`: Execute the command currently in the "
"commandline."),
'prompt': (
"Keybindings for prompts in the status line.\n"
"You can bind normal keys in this mode, but they will be only active "
"when a yes/no-prompt is asked. For other prompt modes, you can only "
"bind special keys.\n"
"Useful hidden commands to map in this section:\n\n"
" * `prompt-accept`: Confirm the entered value.\n"
" * `prompt-accept yes`: Answer yes to a yes/no question.\n"
" * `prompt-accept no`: Answer no to a yes/no question."),
'caret': (
""),
}
# Keys which are similar to Return and should be bound by default where Return
# is bound.
RETURN_KEYS = ['<Return>', '<Ctrl-M>', '<Ctrl-J>', '<Shift-Return>', '<Enter>',
'<Shift-Enter>']
KEY_DATA = collections.OrderedDict([
('!normal', collections.OrderedDict([
('leave-mode', ['<Escape>', '<Ctrl-[>']),
])),
('normal', collections.OrderedDict([
('clear-keychain ;; search ;; fullscreen --leave',
['<Escape>', '<Ctrl-[>']),
('set-cmd-text -s :open', ['o']),
('set-cmd-text :open {url:pretty}', ['go']),
('set-cmd-text -s :open -t', ['O']),
('set-cmd-text :open -t -i {url:pretty}', ['gO']),
('set-cmd-text -s :open -b', ['xo']),
('set-cmd-text :open -b -i {url:pretty}', ['xO']),
('set-cmd-text -s :open -w', ['wo']),
('set-cmd-text :open -w {url:pretty}', ['wO']),
('set-cmd-text /', ['/']),
('set-cmd-text ?', ['?']),
('set-cmd-text :', [':']),
('open -t', ['ga', '<Ctrl-T>']),
('open -w', ['<Ctrl-N>']),
('tab-close', ['d', '<Ctrl-W>']),
('tab-close -o', ['D']),
('tab-only', ['co']),
('tab-focus', ['T']),
('tab-move', ['gm']),
('tab-move -', ['gl']),
('tab-move +', ['gr']),
('tab-next', ['J', '<Ctrl-PgDown>']),
('tab-prev', ['K', '<Ctrl-PgUp>']),
('tab-clone', ['gC']),
('reload', ['r', '<F5>']),
('reload -f', ['R', '<Ctrl-F5>']),
('back', ['H', '<back>']),
('back -t', ['th']),
('back -w', ['wh']),
('forward', ['L', '<forward>']),
('forward -t', ['tl']),
('forward -w', ['wl']),
('fullscreen', ['<F11>']),
('hint', ['f']),
('hint all tab', ['F']),
('hint all window', ['wf']),
('hint all tab-bg', [';b']),
('hint all tab-fg', [';f']),
('hint all hover', [';h']),
('hint images', [';i']),
('hint images tab', [';I']),
('hint links fill :open {hint-url}', [';o']),
('hint links fill :open -t -i {hint-url}', [';O']),
('hint links yank', [';y']),
('hint links yank-primary', [';Y']),
('hint --rapid links tab-bg', [';r']),
('hint --rapid links window', [';R']),
('hint links download', [';d']),
('hint inputs', [';t']),
('scroll left', ['h']),
('scroll down', ['j']),
('scroll up', ['k']),
('scroll right', ['l']),
('undo', ['u', '<Ctrl-Shift-T>']),
('scroll-perc 0', ['gg']),
('scroll-perc', ['G']),
('search-next', ['n']),
('search-prev', ['N']),
('enter-mode insert', ['i']),
('enter-mode caret', ['v']),
('enter-mode set_mark', ['`']),
('enter-mode jump_mark', ["'"]),
('yank', ['yy']),
('yank -s', ['yY']),
('yank title', ['yt']),
('yank title -s', ['yT']),
('yank domain', ['yd']),
('yank domain -s', ['yD']),
('yank pretty-url', ['yp']),
('yank pretty-url -s', ['yP']),
('open -- {clipboard}', ['pp']),
('open -- {primary}', ['pP']),
('open -t -- {clipboard}', ['Pp']),
('open -t -- {primary}', ['PP']),
('open -w -- {clipboard}', ['wp']),
('open -w -- {primary}', ['wP']),
('quickmark-save', ['m']),
('set-cmd-text -s :quickmark-load', ['b']),
('set-cmd-text -s :quickmark-load -t', ['B']),
('set-cmd-text -s :quickmark-load -w', ['wb']),
('bookmark-add', ['M']),
('set-cmd-text -s :bookmark-load', ['gb']),
('set-cmd-text -s :bookmark-load -t', ['gB']),
('set-cmd-text -s :bookmark-load -w', ['wB']),
('save', ['sf']),
('set-cmd-text -s :set', ['ss']),
('set-cmd-text -s :set -t', ['sl']),
('set-cmd-text -s :bind', ['sk']),
('zoom-out', ['-']),
('zoom-in', ['+']),
('zoom', ['=']),
('navigate prev', ['[[']),
('navigate next', [']]']),
('navigate prev -t', ['{{']),
('navigate next -t', ['}}']),
('navigate up', ['gu']),
('navigate up -t', ['gU']),
('navigate increment', ['<Ctrl-A>']),
('navigate decrement', ['<Ctrl-X>']),
('inspector', ['wi']),
('download', ['gd']),
('download-cancel', ['ad']),
('download-clear', ['cd']),
('view-source', ['gf']),
('set-cmd-text -s :buffer', ['gt']),
('tab-focus last', ['<Ctrl-Tab>', '<Ctrl-6>', '<Ctrl-^>']),
('enter-mode passthrough', ['<Ctrl-V>']),
('quit', ['<Ctrl-Q>', 'ZQ']),
('wq', ['ZZ']),
('scroll-page 0 1', ['<Ctrl-F>']),
('scroll-page 0 -1', ['<Ctrl-B>']),
('scroll-page 0 0.5', ['<Ctrl-D>']),
('scroll-page 0 -0.5', ['<Ctrl-U>']),
('tab-focus 1', ['<Alt-1>', 'g0', 'g^']),
('tab-focus 2', ['<Alt-2>']),
('tab-focus 3', ['<Alt-3>']),
('tab-focus 4', ['<Alt-4>']),
('tab-focus 5', ['<Alt-5>']),
('tab-focus 6', ['<Alt-6>']),
('tab-focus 7', ['<Alt-7>']),
('tab-focus 8', ['<Alt-8>']),
('tab-focus -1', ['<Alt-9>', 'g$']),
('home', ['<Ctrl-h>']),
('stop', ['<Ctrl-s>']),
('print', ['<Ctrl-Alt-p>']),
('open qute://settings', ['Ss']),
('follow-selected', RETURN_KEYS),
('follow-selected -t', ['<Ctrl-Return>', '<Ctrl-Enter>']),
('repeat-command', ['.']),
('tab-pin', ['<Ctrl-p>']),
('record-macro', ['q']),
('run-macro', ['@']),
])),
('insert', collections.OrderedDict([
('open-editor', ['<Ctrl-E>']),
('insert-text {primary}', ['<Shift-Ins>']),
])),
('hint', collections.OrderedDict([
('follow-hint', RETURN_KEYS),
('hint --rapid links tab-bg', ['<Ctrl-R>']),
('hint links', ['<Ctrl-F>']),
('hint all tab-bg', ['<Ctrl-B>']),
])),
('passthrough', {}),
('command', collections.OrderedDict([
('command-history-prev', ['<Ctrl-P>']),
('command-history-next', ['<Ctrl-N>']),
('completion-item-focus prev', ['<Shift-Tab>', '<Up>']),
('completion-item-focus next', ['<Tab>', '<Down>']),
('completion-item-focus next-category', ['<Ctrl-Tab>']),
('completion-item-focus prev-category', ['<Ctrl-Shift-Tab>']),
('completion-item-del', ['<Ctrl-D>']),
('command-accept', RETURN_KEYS),
])),
('prompt', collections.OrderedDict([
('prompt-accept', RETURN_KEYS),
('prompt-accept yes', ['y']),
('prompt-accept no', ['n']),
('prompt-open-download', ['<Ctrl-X>']),
('prompt-item-focus prev', ['<Shift-Tab>', '<Up>']),
('prompt-item-focus next', ['<Tab>', '<Down>']),
])),
('command,prompt', collections.OrderedDict([
('rl-backward-char', ['<Ctrl-B>']),
('rl-forward-char', ['<Ctrl-F>']),
('rl-backward-word', ['<Alt-B>']),
('rl-forward-word', ['<Alt-F>']),
('rl-beginning-of-line', ['<Ctrl-A>']),
('rl-end-of-line', ['<Ctrl-E>']),
('rl-unix-line-discard', ['<Ctrl-U>']),
('rl-kill-line', ['<Ctrl-K>']),
('rl-kill-word', ['<Alt-D>']),
('rl-unix-word-rubout', ['<Ctrl-W>']),
('rl-backward-kill-word', ['<Alt-Backspace>']),
('rl-yank', ['<Ctrl-Y>']),
('rl-delete-char', ['<Ctrl-?>']),
('rl-backward-delete-char', ['<Ctrl-H>']),
])),
('caret', collections.OrderedDict([
('toggle-selection', ['v', '<Space>']),
('drop-selection', ['<Ctrl-Space>']),
('enter-mode normal', ['c']),
('move-to-next-line', ['j']),
('move-to-prev-line', ['k']),
('move-to-next-char', ['l']),
('move-to-prev-char', ['h']),
('move-to-end-of-word', ['e']),
('move-to-next-word', ['w']),
('move-to-prev-word', ['b']),
('move-to-start-of-next-block', [']']),
('move-to-start-of-prev-block', ['[']),
('move-to-end-of-next-block', ['}']),
('move-to-end-of-prev-block', ['{']),
('move-to-start-of-line', ['0']),
('move-to-end-of-line', ['$']),
('move-to-start-of-document', ['gg']),
('move-to-end-of-document', ['G']),
('yank selection -s', ['Y']),
('yank selection', ['y'] + RETURN_KEYS),
('scroll left', ['H']),
('scroll down', ['J']),
('scroll up', ['K']),
('scroll right', ['L']),
])),
])
# A list of (regex, replacement) tuples of changed key commands.
CHANGED_KEY_COMMANDS = [
(re.compile(r'^open -([twb]) about:blank$'), r'open -\1'),
(re.compile(r'^download-page$'), r'download'),
(re.compile(r'^cancel-download$'), r'download-cancel'),
(re.compile(r"""^search (''|"")$"""),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r'^search$'),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r'^clear-keychain ;; search$'),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r"""^set-cmd-text ['"](.*) ['"]$"""), r'set-cmd-text -s \1'),
(re.compile(r"""^set-cmd-text ['"](.*)['"]$"""), r'set-cmd-text \1'),
(re.compile(r"^hint links rapid$"), r'hint --rapid links tab-bg'),
(re.compile(r"^hint links rapid-win$"), r'hint --rapid links window'),
(re.compile(r'^scroll -50 0$'), r'scroll left'),
(re.compile(r'^scroll 0 50$'), r'scroll down'),
(re.compile(r'^scroll 0 -50$'), r'scroll up'),
(re.compile(r'^scroll 50 0$'), r'scroll right'),
(re.compile(r'^scroll ([-\d]+ [-\d]+)$'), r'scroll-px \1'),
(re.compile(r'^search *;; *clear-keychain$'),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r'^clear-keychain *;; *leave-mode$'), r'leave-mode'),
(re.compile(r'^download-remove --all$'), r'download-clear'),
(re.compile(r'^hint links fill "([^"]*)"$'), r'hint links fill \1'),
(re.compile(r'^yank -t(\S+)'), r'yank title -\1'),
(re.compile(r'^yank -t'), r'yank title'),
(re.compile(r'^yank -d(\S+)'), r'yank domain -\1'),
(re.compile(r'^yank -d'), r'yank domain'),
(re.compile(r'^yank -p(\S+)'), r'yank pretty-url -\1'),
(re.compile(r'^yank -p'), r'yank pretty-url'),
(re.compile(r'^yank-selected -p'), r'yank selection -s'),
(re.compile(r'^yank-selected'), r'yank selection'),
(re.compile(r'^paste$'), r'open -- {clipboard}'),
(re.compile(r'^paste -s$'), r'open -- {primary}'),
(re.compile(r'^paste -([twb])$'), r'open -\1 -- {clipboard}'),
(re.compile(r'^paste -([twb])s$'), r'open -\1 -- {primary}'),
(re.compile(r'^paste -s([twb])$'), r'open -\1 -- {primary}'),
(re.compile(r'^completion-item-next'), r'completion-item-focus next'),
(re.compile(r'^completion-item-prev'), r'completion-item-focus prev'),
(re.compile(r'^open {clipboard}$'), r'open -- {clipboard}'),
(re.compile(r'^open -([twb]) {clipboard}$'), r'open -\1 -- {clipboard}'),
(re.compile(r'^open {primary}$'), r'open -- {primary}'),
(re.compile(r'^open -([twb]) {primary}$'), r'open -\1 -- {primary}'),
(re.compile(r'^paste-primary$'), r'insert-text {primary}'),
(re.compile(r'^set-cmd-text -s :search$'), r'set-cmd-text /'),
(re.compile(r'^set-cmd-text -s :search -r$'), r'set-cmd-text ?'),
(re.compile(r'^set-cmd-text -s :$'), r'set-cmd-text :'),
(re.compile(r'^set-cmd-text -s :set keybind$'), r'set-cmd-text -s :bind'),
(re.compile(r'^prompt-yes$'), r'prompt-accept yes'),
(re.compile(r'^prompt-no$'), r'prompt-accept no'),
(re.compile(r'^tab-close -l$'), r'tab-close --prev'),
(re.compile(r'^tab-close --left$'), r'tab-close --prev'),
(re.compile(r'^tab-close -r$'), r'tab-close --next'),
(re.compile(r'^tab-close --right$'), r'tab-close --next'),
(re.compile(r'^tab-only -l$'), r'tab-only --prev'),
(re.compile(r'^tab-only --left$'), r'tab-only --prev'),
(re.compile(r'^tab-only -r$'), r'tab-only --next'),
(re.compile(r'^tab-only --right$'), r'tab-only --next'),
]
|
gpl-3.0
| -5,174,265,861,828,115,000
| 40.019491
| 79
| 0.522313
| false
| 4.154584
| false
| false
| false
|
cuauv/software
|
shm_tools/shmlog/util/logbenchmark.py
|
1
|
1703
|
#!/usr/bin/env python2
from shm_tools.shmlog.parser import LogParser
import argparse
import sys
from datetime import datetime
from time import sleep, mktime, time
import os
import struct
import sys
'''
Utility for benchmarking log file access
Jeff Heidel 2013
'''
GROUP = 0xFFFF
END_STBL = 0xFFFFFFFFFFFFFFFF
ap = argparse.ArgumentParser(description='Benchmark a log file playback.')
ap.add_argument('log file', type=str, help='filename of the log file')
args = vars(ap.parse_args())
filename = args['log file']
try:
login = open(filename, 'r')
except IOError:
print "Input filename " + filename + " not found."
sys.exit(0)
st = time()
try:
parse = LogParser(filename, parse_file_end=False)
except:
print "There was a problem with the log header."
sys.exit(0)
et = time() - st
print "Log header parse took %.2f ms" % (et * 1e3)
#Starting log benchmark
st = time()
total_time = 0
all_type_times = {}
slices = 0
#Continue parsing and copying until an error occurs!
while True:
logslice = parse.parse_one_slice(benchmark=True)
if logslice is None:
break
(dt, vc, parse_time, type_times) = logslice
total_time += parse_time
for k in type_times.keys():
if k not in all_type_times:
all_type_times[k] = type_times[k]
else:
all_type_times[k] = all_type_times[k] + type_times[k]
slices += 1
et = time() - st
print "Complete log parse took %.2f sec" % et
print "Parsed %d slices" % slices
print "%15s %11s" % ("Var Type", "Time")
print "-"*28
for (k,v) in zip(all_type_times.keys(), all_type_times.values()):
print "%15s: %7.2f sec" % (k.__name__, v)
print "-"*28
print "Log benchmark complete"
|
bsd-3-clause
| -3,123,619,079,962,118,700
| 21.706667
| 74
| 0.660012
| false
| 3.13628
| false
| false
| false
|
hayd/leanpub
|
leanpub/watcher.py
|
1
|
1473
|
from watchdog.observers import Observer
from watchdog.watchmedo import observe_with
from leanpub.shellcommandtrick import ShellCommandTrick
def pandoc_cmd(book):
"""Create the command to convert the files (listed in `book`)
into a pdf. This is wrapped with echos that the build has started and
is complete."""
with open(book + ".txt") as f:
return ('echo "Starting build of {book}.pdf"'
" && pandoc {files} "
"-f markdown-smart --table-of-contents --top-level-division=chapter -o {book}.pdf"
' && echo " {book}.pdf created."'
).format(book=book,
files=f.read().replace("\n", " "))
try:
MAKE_BOOK = pandoc_cmd("Book")
except IOError:
print("Can't find Book.txt in directory.")
exit(1)
try:
MAKE_SAMPLE = pandoc_cmd("Sample")
except IOError:
# Sample.txt is optional.
MAKE_SAMPLE = ""
# TODO watch images
PATTERNS = ["*.markdown", "*.md", "Book.txt", "Sample.txt"]
DIRECTORIES = "."
RECURSIVE = False
TIMEOUT = 1.0
def watch():
"""Watch for changes to the markdown files, and build the book and the
sample pdf upon each change."""
handler = ShellCommandTrick(shell_command=MAKE_BOOK + " && " + MAKE_SAMPLE,
patterns=PATTERNS,
terminate_on_event=True)
observer = Observer(timeout=TIMEOUT)
observe_with(observer, handler, DIRECTORIES, RECURSIVE)
|
mit
| -9,163,009,473,004,781,000
| 31.021739
| 98
| 0.60964
| false
| 3.64604
| false
| false
| false
|
bsmithgall/wexplorer
|
wexplorer/explorer/models.py
|
1
|
2184
|
# -*- coding: utf-8 -*-
from wexplorer.database import (
Column,
db,
Model
)
from wexplorer.extensions import bcrypt
class FileUploadPassword(Model):
__tablename__ = 'file_upload_password'
password = Column(db.String(128), nullable=False, primary_key=True)
def __init__(self, password):
if password:
self.password = bcrypt.generate_password_hash(password)
else:
raise Exception('File Upload Password must be supplied')
class LastUpdated(Model):
__tablename__ = 'last_updated'
last_updated = Column(db.DateTime, primary_key=True)
def __init__(self, last_updated):
self.last_updated = last_updated
class Company(Model):
__tablename__ = 'company'
row_id = Column(db.Integer)
company_id = Column(db.String(32), primary_key=True)
company = Column(db.String(255))
bus_type = Column(db.String(255))
company_contacts = db.relationship('CompanyContact', backref='company', lazy='joined')
contracts = db.relationship('Contract', backref='company', lazy='joined')
class CompanyContact(Model):
__tablename__ = 'company_contact'
row_id = Column(db.Integer)
company_contact_id = Column(db.String(32), primary_key=True)
contact_name = Column(db.String(255))
address_1 = Column(db.String(255))
address_2 = Column(db.String(255))
phone_number = Column(db.String(255))
fax_number = Column(db.String(255))
email = Column(db.String(255))
fin = Column(db.String(255))
company_id = Column(db.String(32), db.ForeignKey('company.company_id'))
class Contract(Model):
__tablename__ = 'contract'
row_id = Column(db.Integer)
contract_id = Column(db.String(32), primary_key=True)
description = Column(db.Text)
notes = Column(db.Text)
county = Column(db.String(255))
type_of_contract = Column(db.String(255))
pa = Column(db.String(255))
expiration = Column(db.DateTime)
contract_number = Column(db.String(255))
contract_sub_number = Column(db.Integer)
controller_number = Column(db.Integer)
commcode = Column(db.Integer)
company_id = Column(db.String(32), db.ForeignKey('company.company_id'))
|
bsd-3-clause
| 3,477,530,918,630,866,400
| 33.125
| 90
| 0.665293
| false
| 3.461173
| false
| false
| false
|
nelhage/taktician
|
python/scripts/allmoves.py
|
1
|
1177
|
import tak.ptn
import sqlite3
import os.path
import collections
import traceback
SIZE = 5
GAMES_DIR = os.path.join(os.path.dirname(__file__), "../../games")
DB = sqlite3.connect(os.path.join(GAMES_DIR, "games.db"))
cur = DB.cursor()
cur.execute('select day, id from games where size = ?', (SIZE,))
corpus = collections.Counter()
while True:
row = cur.fetchone()
if not row:
break
day, id = None, None
try:
day, id = row
text = open(os.path.join(GAMES_DIR, day, str(id) + ".ptn")).read()
ptn = tak.ptn.PTN.parse(text)
for m in ptn.moves:
corpus[m] += 1
except Exception as e:
print("{0}/{1}: {2}".format(day, id, e))
traceback.print_exc()
continue
all_moves = set(tak.enumerate_moves(SIZE))
seen_moves = set(corpus.keys())
total = sum(corpus.values())
print("observed {0} unique moves".format(len(corpus)))
print("failed to generate: ", [tak.ptn.format_move(m) for m in seen_moves - all_moves])
print("did not observe: ", [tak.ptn.format_move(m) for m in all_moves - seen_moves])
for k, v in sorted(corpus.items(), key=lambda p:-p[1])[:50]:
print("{0:6.2f}% ({2:6d}) {1}".format(100*v/total, tak.ptn.format_move(k), v))
|
mit
| -6,197,537,536,854,740,000
| 27.707317
| 87
| 0.643161
| false
| 2.75
| false
| false
| false
|
Clinical-Genomics/scout
|
tests/adapter/mongo/test_user_handling.py
|
1
|
4378
|
from scout.build.user import build_user
def test_delete_user(adapter):
institutes = ["test-1", "test-2"]
## GIVEN an adapter with two users
for i, ins in enumerate(institutes, 1):
user_info = {
"email": "clark.kent{}@mail.com".format(i),
"id": "clke0" + str(i),
"location": "here",
"name": "Clark Kent",
"institutes": [ins],
}
user_obj = build_user(user_info)
user_obj = adapter.add_user(user_obj)
assert sum(1 for user in adapter.users()) == 2
## WHEN deleting a user
adapter.delete_user(email="clark.kent1@mail.com")
## THEN assert that there is only ine user left
assert sum(1 for user in adapter.users()) == 1
def test_update_user(real_adapter):
adapter = real_adapter
## GIVEN an adapter with a user
user_info = {
"email": "clark.kent@mail.com",
"location": "here",
"name": "Clark Kent",
"institutes": ["test-1"],
}
user_obj = build_user(user_info)
user_obj = adapter.add_user(user_obj)
assert user_obj["institutes"] == ["test-1"]
## WHEN updating a user
user_info["institutes"].append("test-2")
user_obj = build_user(user_info)
adapter.update_user(user_obj)
## THEN assert that the user is in the database
updated_user = adapter.user_collection.find_one()
assert set(updated_user["institutes"]) == set(["test-1", "test-2"])
def test_insert_user(adapter):
user_info = {
"email": "clark.kent@mail.com",
"location": "here",
"name": "Clark Kent",
"institutes": ["test-1"],
}
user_obj = build_user(user_info)
## GIVEN a empty adapter
assert adapter.user_collection.find_one() is None
## WHEN inserting a user
user_obj = adapter.add_user(user_obj)
## THEN assert that the user is in the database
assert adapter.user_collection.find_one()
def test_get_users_institute(adapter):
institutes = ["test-1", "test-2"]
## GIVEN an adapter with multiple users
for i, ins in enumerate(institutes, 1):
user_info = {
"email": "clark.kent{}@mail.com".format(i),
"id": "clke0" + str(i),
"location": "here",
"name": "Clark Kent",
"institutes": [ins],
}
user_obj = build_user(user_info)
user_obj = adapter.add_user(user_obj)
## WHEN fetching all users
users = adapter.users(institute=institutes[0])
## THEN assert that both users are fetched
assert sum(1 for user in users) == 1
def test_get_users(adapter):
institutes = ["test-1", "test-2"]
## GIVEN an adapter with multiple users
for i, ins in enumerate(institutes, 1):
user_info = {
"email": "clark.kent{}@mail.com".format(i),
"id": "clke0" + str(i),
"location": "here",
"name": "Clark Kent",
"institutes": [ins],
}
user_obj = build_user(user_info)
user_obj = adapter.add_user(user_obj)
## WHEN fetching all users
users = adapter.users()
## THEN assert that both users are fetched
assert sum(1 for user in users) == len(institutes)
def test_get_user_id(adapter):
user_info = {
"email": "clark.kent@mail.com",
"id": "clke01",
"location": "here",
"name": "Clark Kent",
"institutes": ["test-1"],
}
user_obj = build_user(user_info)
user_obj = adapter.add_user(user_obj)
## WHEN fetching the user with email
user = adapter.user(user_id="clke01")
## THEN assert that the user is fetched
assert user
def test_get_user_email(adapter):
user_info = {
"email": "clark.kent@mail.com",
"id": "clke01",
"location": "here",
"name": "Clark Kent",
"institutes": ["test-1"],
}
user_obj = build_user(user_info)
user_obj = adapter.add_user(user_obj)
## WHEN fetching the user with email
user = adapter.user(email="clark.kent@mail.com")
## THEN assert that the user is fetched
assert user
def test_get_nonexisting_user(adapter):
## GIVEN an empty adapter
assert adapter.user_collection.find_one() is None
## WHEN fetching a non existing user
user_obj = adapter.user(email="john.doe@mail.com")
## THEN assert the user is None
assert user_obj is None
|
bsd-3-clause
| 870,829,471,759,422,700
| 28.986301
| 71
| 0.584513
| false
| 3.319181
| true
| false
| false
|
diver-in-sky/django-threaded-multihost
|
threaded_multihost/test_app/settings.py
|
1
|
3063
|
# Django settings for mytest project.
from os.path import normpath, join, dirname
#ROOT : the django project root
ROOT = lambda *base : normpath(join(dirname(__file__), *base)).replace('\\','/')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'threaded.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$%b)@1zxin5gh19vsj(@nn=hm-!31ejy4gyc*391@-(odwsf+u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'threaded_multihost.middleware.ThreadLocalMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# ROOT('templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'test_app.model_tests',
)
|
bsd-3-clause
| 6,086,891,033,615,040,000
| 34.206897
| 101
| 0.709435
| false
| 3.52069
| false
| false
| false
|
tsuru/healthcheck-as-a-service
|
tests/test_api.py
|
1
|
9751
|
# Copyright 2015 healthcheck-as-a-service authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
import json
import mock
import inspect
import os
from healthcheck import api, backends
from . import managers
class APITestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = api.app.test_client()
cls.manager = managers.FakeManager()
api.get_manager = lambda: cls.manager
def setUp(self):
self.manager.new("hc")
def tearDown(self):
self.manager.remove("hc")
def test_add_url(self):
resp = self.api.post(
"/resources/hc/url",
data=json.dumps({"url": "http://bla.com"})
)
self.assertEqual(201, resp.status_code)
self.assertIn(
{"url": "http://bla.com", "expected_string": None, "comment": ""},
self.manager.healthchecks["hc"]["urls"]
)
def test_add_url_expected_string(self):
resp = self.api.post(
"/resources/hc/url",
data=json.dumps({"url": "http://blabla.com",
"expected_string": "WORKING"})
)
self.assertEqual(201, resp.status_code)
self.assertIn(
{"url": "http://blabla.com", "expected_string": "WORKING", "comment": ""},
self.manager.healthchecks["hc"]["urls"]
)
def test_add_url_comment(self):
resp = self.api.post(
"/resources/hc/url",
data=json.dumps({"url": "http://blabla.com", "comment": "ble"})
)
self.assertEqual(201, resp.status_code)
self.assertIn(
{"url": "http://blabla.com", "expected_string": None, "comment": "ble"},
self.manager.healthchecks["hc"]["urls"]
)
def test_add_url_bad_request(self):
resp = self.api.post(
"/resources/hc/url",
)
self.assertEqual(400, resp.status_code)
self.assertEqual(resp.data, 'url is required')
resp = self.api.post(
"/resources/hc/url",
data=json.dumps({})
)
self.assertEqual(400, resp.status_code)
self.assertEqual(resp.data, 'url is required')
def test_list_urls(self):
self.manager.add_url("hc", "http://bla.com")
resp = self.api.get(
"/resources/hc/url",
)
self.assertEqual(200, resp.status_code)
self.assertIn(
"http://bla.com",
resp.data
)
def test_remove_url(self):
self.manager.add_url("hc", "http://bla.com/")
resp = self.api.delete("/resources/hc/url",
data=json.dumps({"url": "http://bla.com/"}))
self.assertEqual(204, resp.status_code)
self.assertNotIn(
"http://bla.com/",
self.manager.healthchecks["hc"]["urls"]
)
def test_remove_url_no_data(self):
resp = self.api.delete("/resources/hc/url")
self.assertEqual(400, resp.status_code)
self.assertEqual("url is required", resp.data)
def test_remove_url_invalid_data(self):
resp = self.api.delete("/resources/hc/url", data={})
self.assertEqual(400, resp.status_code)
self.assertEqual("url is required", resp.data)
def test_remove_url_invalid_url(self):
resp = self.api.delete("/resources/hc/url",
data=json.dumps({"url": "http://url-not-exist.com/"}))
self.assertEqual(404, resp.status_code)
self.assertEqual("URL not found.", resp.data)
def test_add_watcher(self):
resp = self.api.post(
"/resources/hc/watcher",
data=json.dumps({"watcher": "watcher@watcher.com"})
)
self.assertEqual(201, resp.status_code)
self.assertIn(
"watcher@watcher.com",
self.manager.healthchecks["hc"]["users"]
)
def test_add_watcher_with_password(self):
resp = self.api.post(
"/resources/hc/watcher",
data=json.dumps({
"watcher": "watcher@watcher.com",
"password": "teste",
})
)
self.assertEqual(201, resp.status_code)
self.assertIn(
"watcher@watcher.com",
self.manager.healthchecks["hc"]["users"]
)
def test_add_watcher_bad_request(self):
resp = self.api.post("/resources/hc/watcher")
self.assertEqual(400, resp.status_code)
self.assertEqual(resp.data, "watcher is required")
resp = self.api.post("/resources/hc/watcher", data=json.dumps({}))
self.assertEqual(400, resp.status_code)
self.assertEqual(resp.data, "watcher is required")
def test_list_watchers(self):
self.manager.add_watcher("hc", "test@test.com")
resp = self.api.get(
"/resources/hc/watcher",
)
self.assertEqual(200, resp.status_code)
self.assertIn(
"test@test.com",
resp.data
)
def test_new(self):
resp = self.api.post(
"/resources",
data={"name": "other"}
)
self.assertEqual(201, resp.status_code)
self.assertIn("other", self.manager.healthchecks)
def test_bind_unit(self):
resp = self.api.post("/resources/name/bind")
self.assertEqual(201, resp.status_code)
def test_bind_app(self):
resp = self.api.post("/resources/name/bind-app")
self.assertEqual(200, resp.status_code)
def test_unbind_unit(self):
resp = self.api.delete("/resources/name/bind")
self.assertEqual(200, resp.status_code)
def test_unbind_app(self):
resp = self.api.delete("/resources/name/bind-app")
self.assertEqual(200, resp.status_code)
def test_remove(self):
self.manager.new("blabla")
resp = self.api.delete("/resources/blabla")
self.assertEqual(204, resp.status_code)
self.assertNotIn("blabla", self.manager.healthchecks)
def test_remove_watcher_compat(self):
self.manager.add_watcher("hc", "watcher@watcher.com")
resp = self.api.delete("/resources/hc/XanythingX/watcher/watcher@watcher.com")
self.assertEqual(204, resp.status_code)
self.assertNotIn(
"watcher@watcher.com",
self.manager.healthchecks["hc"]["users"]
)
def test_remove_watcher(self):
self.manager.add_watcher("hc", "watcher@watcher.com")
resp = self.api.delete("/resources/hc/watcher/watcher@watcher.com")
self.assertEqual(204, resp.status_code)
self.assertNotIn(
"watcher@watcher.com",
self.manager.healthchecks["hc"]["users"]
)
def test_plugin(self):
resp = self.api.get("/plugin")
self.assertEqual(200, resp.status_code)
from healthcheck import plugin
expected_source = inspect.getsource(plugin)
self.assertEqual(expected_source, resp.data)
def test_add_group(self):
resp = self.api.post(
"/resources/hc/groups",
data=json.dumps({"group": "mygroup"})
)
self.assertEqual(201, resp.status_code)
self.assertIn(
"mygroup",
self.manager.healthchecks["hc"]["host_groups"]
)
def test_add_group_bad_request(self):
resp = self.api.post("/resources/hc/groups")
self.assertEqual(400, resp.status_code)
self.assertEqual(resp.data, "group is required")
resp = self.api.post("/resources/hc/groups", data=json.dumps({}))
self.assertEqual(400, resp.status_code)
self.assertEqual(resp.data, "group is required")
def test_list_service_groups(self):
resp = self.api.get(
"/resources/hc/servicegroups",
)
self.assertEqual(200, resp.status_code)
self.assertIn(
"mygroup",
resp.data
)
def test_list_service_groups_keyword(self):
resp = self.api.get(
"/resources/hc/servicegroups?keyword=my",
)
self.assertEqual(200, resp.status_code)
self.assertIn(
"mygroup",
resp.data
)
self.assertNotIn(
"anothergroup",
resp.data
)
def test_list_groups(self):
self.manager.add_group("hc", "mygroup")
resp = self.api.get(
"/resources/hc/groups",
)
self.assertEqual(200, resp.status_code)
self.assertIn(
"mygroup",
resp.data
)
def test_remove_group(self):
self.manager.add_group("hc", "mygroup")
resp = self.api.delete("/resources/hc/groups",
data=json.dumps({"group": "mygroup"}))
self.assertEqual(204, resp.status_code)
self.assertNotIn(
"mygroup",
self.manager.healthchecks["hc"]["host_groups"]
)
class GetManagerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
reload(api)
@mock.patch("pyzabbix.ZabbixAPI")
def test_get_manager(self, zabbix_mock):
os.environ["ZABBIX_URL"] = ""
os.environ["ZABBIX_USER"] = ""
os.environ["ZABBIX_PASSWORD"] = ""
os.environ["ZABBIX_HOST"] = ""
os.environ["ZABBIX_HOST_GROUP"] = ""
manager = api.get_manager()
self.assertIsInstance(manager, backends.Zabbix)
@mock.patch("healthcheck.backends.Zabbix")
def test_get_manager_that_does_not_exist(self, zabbix_mock):
os.environ["API_MANAGER"] = "doesnotexist"
with self.assertRaises(ValueError):
api.get_manager()
|
bsd-3-clause
| -8,438,007,150,804,135,000
| 31.503333
| 86
| 0.56948
| false
| 3.777993
| true
| false
| false
|
smartsheet-platform/smartsheet-python-sdk
|
smartsheet/models/contact.py
|
1
|
2222
|
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from ..types import *
from ..util import serialize
from ..util import deserialize
class Contact(object):
"""Smartsheet Contact data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the Contact model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._email = String()
self._id_ = String()
self._name = String()
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
self.__initialized = True
def __getattr__(self, key):
if key == 'id':
return self.id_
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key == 'id':
self.id_ = value
else:
super(Contact, self).__setattr__(key, value)
@property
def email(self):
return self._email.value
@email.setter
def email(self, value):
self._email.value = value
@property
def id_(self):
return self._id_.value
@id_.setter
def id_(self, value):
self._id_.value = value
@property
def name(self):
return self._name.value
@name.setter
def name(self, value):
self._name.value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
apache-2.0
| -6,362,989,200,886,816,000
| 23.966292
| 75
| 0.613411
| false
| 3.982079
| false
| false
| false
|
originaltebas/chmembers
|
migrations/versions/f7f3dbae07bb_.py
|
1
|
1302
|
"""empty message
Revision ID: f7f3dbae07bb
Revises: 1e4e2ceb943c
Create Date: 2019-06-05 17:52:42.855915
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'f7f3dbae07bb'
down_revision = '1e4e2ceb943c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('asistencias', 'asistio',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=False)
op.alter_column('miembros', 'hoja_firmada',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('miembros', 'hoja_firmada',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=True)
op.alter_column('asistencias', 'asistio',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=False)
# ### end Alembic commands ###
|
mit
| -7,327,431,518,141,573,000
| 30
| 65
| 0.62212
| false
| 3.538043
| false
| false
| false
|
thirdkey-solutions/multisig-recovery
|
multisigrecovery/commands.py
|
1
|
5846
|
from multisigcore.providers.insight import InsightBatchService
from multisigcore.hierarchy import MasterKey
from .branch import Branch, AccountPubkeys
from .recovery import CachedRecovery
from .batch import Batch
import json
from pycoin.encoding import EncodingError
try:
from termcolor import colored
except ImportError:
def colored(text, color=None): print text
__all__ = ['address', 'create', 'validate', 'cosign', 'broadcast', 'ScriptInputError']
class ScriptInputError(Exception):
pass
def __get_insight(url):
insight = InsightBatchService(url)
try:
insight.get_blockchain_tip()
except Exception:
raise ScriptInputError('Insight node at %s not reachable' % url)
return insight
def __parse_key_sources(key_sources_string, register=None):
try:
strings = key_sources_string.split(',')
return [AccountPubkeys.parse_string_to_account_key_source(string, register_oracle_accounts_file=register) for string in strings]
except ValueError, err:
raise ScriptInputError(err.message)
def __get_template(string):
return getattr(Branch, '%s_account' % string)
def __check_source_strings(args):
def check_cc_last(sources_str):
for source_str in sources_str.split(',')[:-1]:
if 'digitaloracle' in source_str:
raise ScriptInputError('CryptoCorp API always has to be the last account key source\nChange sources order in --origin or --destination')
if args.origin != args.destination and 'digitaloracle' in args.destination and not args.register:
raise ScriptInputError('CryptoCorp API in destination branch but missing --register\nUse --register samples/account-registrations.json')
check_cc_last(args.destination)
check_cc_last(args.origin)
def __add_known_accounts(cached_recovery, known_accounts_file):
with open(known_accounts_file) as fp:
known_accounts = json.load(fp)
for account_number, indexes in known_accounts.items():
if indexes is not None and 'external_leafs' in indexes and 'internal_leafs' in indexes:
cached_recovery.add_known_account(account_number, external_leafs=indexes['external_leafs'], internal_leafs=indexes['internal_leafs'])
else:
cached_recovery.add_known_account(account_number)
############ create, cosign, broadcast methods below ###########################################
def address(args):
"""Will return address of specified path in a branch. Used to manyally cross-check that you are working on correct branch."""
#insight = __get_insight(args.insight)
origin_key_sources = __parse_key_sources(args.origin)
origin_branch = Branch(origin_key_sources, account_template=__get_template(args.origin_template), provider=None)
path = args.path.split('/')
if len(path) != 3 or sum([number.isdigit() for number in path]) != 3:
print "! --path must be in format 0/0/0, digits only"
else:
path = [int(digit) for digit in path]
account = origin_branch.account(int(path[0]))
print "Account %s, address %s/%s: %s" % (path[0], path[1], path[2], account.address(path[2], change=bool(path[1])))
def create(args):
insight = __get_insight(args.insight)
__check_source_strings(args)
# setup
origin_key_sources = __parse_key_sources(args.origin)
origin_branch = Branch(origin_key_sources, account_template=__get_template(args.origin_template), provider=insight)
destination_key_sources = __parse_key_sources(args.destination, register=args.register)
destination_branch = Branch(destination_key_sources, account_template=__get_template(args.destination_template), provider=insight)
cached_recovery = CachedRecovery(origin_branch, destination_branch, provider=insight)
if args.accounts:
__add_known_accounts(cached_recovery, args.accounts)
# recovery
cached_recovery.recover_origin_accounts()
cached_recovery.recover_destination_accounts()
cached_recovery.create_and_sign_txs()
print "Total to recover in this branch: %d" % cached_recovery.total_to_recover
if cached_recovery.total_to_recover:
cached_recovery.export_to_batch(args.save)
def validate(args):
try:
insight = __get_insight(args.insight)
except ScriptInputError:
raw_input("Insight node at " + args.insight + " not reachable. You can start this script again with --insight SOME_URL. Hit ENTER to continue with offline validation, or CTRL+Z to exit...")
insight = None
print ""
try:
batch = Batch.from_file(args.load)
batch.validate(provider=insight)
error = None
except ValueError as err:
print "Validation failed", err
error = err
print " ____"
print " |"
print " | Valid : ", "False, with error: " + str(error) if error else 'True'
print " | Transactions : ", len(batch.batchable_txs)
print " | Merkle root (calc) : ", batch.build_merkle_root()
print " | Merkle root (header) : ", batch.merkle_root
print " | Total out : ", batch.total_out, "satoshi", "-",batch.total_out/(100.0 * 10**6), "BTC"
if not error and insight:
print " | Total in : ", batch.total_in, "satoshi", "-",batch.total_in/(100.0 * 10**6), "BTC"
print " | Total fee : ", batch.total_fee, "satoshi", "-",batch.total_fee/(100.0 * 10**6), "BTC"
print " | Fee Percent : ", batch.total_fee * 100.00 / batch.total_out
print " |____"
print ""
def cosign(args):
try:
backup_mpk = MasterKey.from_key(args.private)
except EncodingError:
backup_mpk = MasterKey.from_seed_hex(args.private)
batch = Batch.from_file(args.load)
batch.validate() # todo - validation
original_merkle_root = batch.merkle_root
batch.sign(master_private_key=backup_mpk)
if batch.merkle_root != original_merkle_root:
batch.to_file(args.save)
else:
print "! All signatures failed: wrong private key used, or malformed batch"
def broadcast(args):
insight = __get_insight(args.insight)
batch = Batch.from_file(args.load)
batch.validate() # todo - validation
batch.broadcast(provider=insight)
|
bsd-2-clause
| 2,622,204,806,939,911,700
| 37.715232
| 191
| 0.712624
| false
| 3.173724
| false
| false
| false
|
miniworld-project/miniworld_core
|
miniworld/model/spatial/Roads.py
|
1
|
4215
|
# encoding: utf-8
from miniworld.model.singletons.Singletons import singletons
from collections import OrderedDict
import geojson
from .Road import Road
__author__ = "Patrick Lampe"
__email__ = "uni at lampep.de"
class Roads:
"""
Attributes
----------
list_of_roads : list
feature_coll_roads : FeatureCollection
geo_json : geojson
list_of_roads_with_quality_more_or_equal_than_one_for_car : list
list_of_roads_with_quality_more_or_equal_than_one_for_bike : list
"""
def __init__(self):
cursor = singletons.spatial_singleton.get_connection_to_database().cursor()
cursor.execute("SELECT source, target, car_rev, car, bike_rev, bike, foot FROM edges")
self.list_of_roads = [self.__convert_sql_line_to_road(line) for line in cursor.fetchall()]
self.feature_coll_roads = geojson.FeatureCollection(
[self.__get_geo_json_object_for_road(road) for road in self.list_of_roads])
self.geo_json = geojson.dumps(self.feature_coll_roads)
self.list_of_roads_with_quality_more_or_equal_than_one_for_car = [road for road in self.list_of_roads if
road.has_more_or_equal_qualitaty_for_car_then(
1)]
self.list_of_roads_with_quality_more_or_equal_than_one_for_bike = [road for road in self.list_of_roads if
road.has_more_or_equal_qualitaty_for_bike_then(
1)]
def get_geo_json(self):
"""
Returns
-------
geo_json
of all existing roads
"""
return self.geo_json
def get_list_of_roads_with_quality_more_or_equal_than_x_for_car(self, quality):
"""
Parameters
----------
quality : int
Returns
-------
list
"""
return [road for road in self.list_of_roads if road.has_more_or_equal_qualitaty_for_car_then(quality)]
def get_list_of_roads_with_quality_more_or_equal_than_x_for_bike(self, quality):
"""
Parameters
----------
quality : int
Returns
-------
list
"""
return [road for road in self.list_of_roads if road.has_more_or_equal_qualitaty_for_bike_then(quality)]
def get_list_of_next_roads_with_quality_restriction_for_cars(self, end_point, quality):
"""
Parameters
----------
quality : int
Returns
-------
list
"""
return [road for road in self.list_of_roads if
road.is_road_direct_rechable_from_given_point_with_quality_restrictions_for_cars(end_point, quality)]
def get_list_of_next_roads_with_quality_restriction_for_bike(self, end_point, quality):
"""
Parameters
----------
end_point : MapNode
Returns
-------
list
"""
return [road for road in self.list_of_roads if
road.is_road_direct_rechable_from_given_point_with_quality_restrictions_for_bike(end_point, quality)]
def __convert_sql_line_to_road(self, line):
return Road(line[0], line[1], line[2], line[3], line[4], line[5], line[6])
def __get_geo_json_object_for_road(self, road):
source = road.get_start_point()
target = road.get_end_point()
quality = road.get_car_quality()
return OrderedDict(
type="Feature",
geometry=OrderedDict(
type="LineString",
coordinates=[[float(source.get_lon()), float(source.get_lat())],
[float(target.get_lon()), float(target.get_lat())]]
),
properties=OrderedDict(
type=str(quality),
name="Coors Field",
amenity="Baseball Stadium"
)
)
|
mit
| 180,887,287,657,060,220
| 35.025641
| 122
| 0.515777
| false
| 3.817935
| false
| false
| false
|
laurybueno/MoniBus
|
mapa/urls.py
|
1
|
1112
|
"""mapa URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from cronotacografo.views import RegistroViewSet
router = routers.SimpleRouter()
router.register(r'registro', RegistroViewSet)
urlpatterns = [
url(r'^admin/', admin.site.urls),
# Endpoints da API
url(r'^api/', include(router.urls)),
# Autenticação da API
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
agpl-3.0
| -3,757,937,932,243,510,300
| 31.647059
| 82
| 0.70991
| false
| 3.490566
| false
| false
| false
|
Linkid/fofix
|
fofix/core/Mod.py
|
1
|
2968
|
#####################################################################
# -*- coding: utf-8 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Ky?stil? #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import logging
import os
from fofix.core.Language import _
from fofix.core import Config
from fofix.core import Theme
log = logging.getLogger(__name__)
def _getModPath(engine):
return engine.resource.fileName("mods")
def init(engine):
# define configuration keys for all available mods
for m in getAvailableMods(engine):
Config.define("mods", "mod_" + m, bool, False, text = m, options = {False: _("Off"), True: _("On")})
# init all active mods
for m in getActiveMods(engine):
activateMod(engine, m)
def getAvailableMods(engine):
modPath = _getModPath(engine)
try:
dirList = os.listdir(modPath)
except OSError:
log.warning("Could not find mods directory")
return []
return [m for m in dirList if os.path.isdir(os.path.join(modPath, m)) and not m.startswith(".")]
def getActiveMods(engine):
mods = []
for mod in getAvailableMods(engine):
if engine.config.get("mods", "mod_" + mod):
mods.append(mod)
mods.sort()
return mods
def activateMod(engine, modName):
modPath = _getModPath(engine)
m = os.path.join(modPath, modName)
t = os.path.join(m, "theme.ini")
if os.path.isdir(m):
engine.resource.addDataPath(m)
if os.path.isfile(t):
theme = Config.load(t)
Theme.open(theme)
def deactivateMod(engine, modName):
modPath = _getModPath(engine)
m = os.path.join(modPath, modName)
engine.resource.removeDataPath(m)
|
gpl-2.0
| 1,096,748,204,278,025,300
| 37.545455
| 109
| 0.523248
| false
| 4.276657
| false
| false
| false
|
Urinx/Project_Euler_Answers
|
128.py
|
1
|
1330
|
#!/usr/bin/env python
#coding:utf-8
"""
Hexagonal tile differences
A hexagonal tile with number 1 is surrounded by a ring of six hexagonal tiles, starting at "12 o'clock" and numbering the tiles 2 to 7 in an anti-clockwise direction.
New rings are added in the same fashion, with the next rings being numbered 8 to 19, 20 to 37, 38 to 61, and so on. The diagram below shows the first three rings.
By finding the difference between tile n and each of its six neighbours we shall define PD(n) to be the number of those differences which are prime.
For example, working clockwise around tile 8 the differences are 12, 29, 11, 6, 1, and 13. So PD(8) = 3.
In the same way, the differences around tile 17 are 1, 17, 16, 1, 11, and 10, hence PD(17) = 2.
It can be shown that the maximum value of PD(n) is 3.
If all of the tiles for which PD(n) = 3 are listed in ascending order to form a sequence, the 10th tile would be 271.
Find the 2000th tile in this sequence.
"""
from projecteuler import is_prime
def tiles(L=2000):
n, c = 1, 1
while c <= L:
r = 6 * n
if is_prime(r-1):
if is_prime(r+1) and is_prime(2*r+5): c += 1
if is_prime(r+5) and is_prime(2*r-7): c += 1
n += 1
return n-1
n = tiles()
print 3*n*(n - 1) + 2 if is_prime(6*n+1) else 3*n*(n + 1) + 1
# 14516824220
|
gpl-2.0
| -5,135,474,069,018,303,000
| 40.59375
| 166
| 0.67218
| false
| 3.03653
| false
| false
| false
|
akx/shoop
|
shoop/core/models/_order_lines.py
|
1
|
5821
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals, with_statement
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum, EnumIntegerField
from jsonfield import JSONField
from shoop.core.fields import MoneyValueField, QuantityField, UnsavedForeignKey
from shoop.core.pricing import Priceful
from shoop.core.taxing import LineTax
from shoop.utils.money import Money
from shoop.utils.properties import MoneyProperty, MoneyPropped, PriceProperty
from ._base import ShoopModel
class OrderLineType(Enum):
PRODUCT = 1
SHIPPING = 2
PAYMENT = 3
DISCOUNT = 4
OTHER = 5
class Labels:
PRODUCT = _('product')
SHIPPING = _('shipping')
PAYMENT = _('payment')
DISCOUNT = _('discount')
OTHER = _('other')
class OrderLineManager(models.Manager):
def products(self): # pragma: no cover
return self.filter(type=OrderLineType.PRODUCT)
def shipping(self): # pragma: no cover
return self.filter(type=OrderLineType.SHIPPING)
def payment(self): # pragma: no cover
return self.filter(type=OrderLineType.PAYMENT)
def discounts(self):
return self.filter(type=OrderLineType.DISCOUNT)
def other(self): # pragma: no cover
return self.filter(type=OrderLineType.OTHER)
@python_2_unicode_compatible
class OrderLine(MoneyPropped, models.Model, Priceful):
order = UnsavedForeignKey("Order", related_name='lines', on_delete=models.PROTECT, verbose_name=_('order'))
product = UnsavedForeignKey(
"Product", blank=True, null=True, related_name="order_lines",
on_delete=models.PROTECT, verbose_name=_('product')
)
supplier = UnsavedForeignKey(
"Supplier", blank=True, null=True, related_name="order_lines",
on_delete=models.PROTECT, verbose_name=_('supplier')
)
parent_line = UnsavedForeignKey(
"self", related_name="child_lines", blank=True, null=True,
on_delete=models.PROTECT, verbose_name=_('parent line')
)
ordering = models.IntegerField(default=0, verbose_name=_('ordering'))
type = EnumIntegerField(OrderLineType, default=OrderLineType.PRODUCT, verbose_name=_('line type'))
sku = models.CharField(max_length=48, blank=True, verbose_name=_('line SKU'))
text = models.CharField(max_length=256, verbose_name=_('line text'))
accounting_identifier = models.CharField(max_length=32, blank=True, verbose_name=_('accounting identifier'))
require_verification = models.BooleanField(default=False, verbose_name=_('require verification'))
verified = models.BooleanField(default=False, verbose_name=_('verified'))
extra_data = JSONField(blank=True, null=True, verbose_name=_('extra data'))
# The following fields govern calculation of the prices
quantity = QuantityField(verbose_name=_('quantity'), default=1)
base_unit_price = PriceProperty('base_unit_price_value', 'order.currency', 'order.prices_include_tax')
discount_amount = PriceProperty('discount_amount_value', 'order.currency', 'order.prices_include_tax')
base_unit_price_value = MoneyValueField(verbose_name=_('unit price amount (undiscounted)'), default=0)
discount_amount_value = MoneyValueField(verbose_name=_('total amount of discount'), default=0)
objects = OrderLineManager()
class Meta:
verbose_name = _('order line')
verbose_name_plural = _('order lines')
def __str__(self):
return "%dx %s (%s)" % (self.quantity, self.text, self.get_type_display())
@property
def tax_amount(self):
"""
:rtype: shoop.utils.money.Money
"""
zero = Money(0, self.order.currency)
return sum((x.amount for x in self.taxes.all()), zero)
def save(self, *args, **kwargs):
if not self.sku:
self.sku = u""
if self.type == OrderLineType.PRODUCT and not self.product_id:
raise ValidationError("Product-type order line can not be saved without a set product")
if self.product_id and self.type != OrderLineType.PRODUCT:
raise ValidationError("Order line has product but is not of Product type")
if self.product_id and not self.supplier_id:
raise ValidationError("Order line has product but no supplier")
super(OrderLine, self).save(*args, **kwargs)
if self.product_id:
self.supplier.module.update_stock(self.product_id)
@python_2_unicode_compatible
class OrderLineTax(MoneyPropped, ShoopModel, LineTax):
order_line = models.ForeignKey(
OrderLine, related_name='taxes', on_delete=models.PROTECT,
verbose_name=_('order line'))
tax = models.ForeignKey(
"Tax", related_name="order_line_taxes",
on_delete=models.PROTECT, verbose_name=_('tax'))
name = models.CharField(max_length=200, verbose_name=_('tax name'))
amount = MoneyProperty('amount_value', 'order_line.order.currency')
base_amount = MoneyProperty('base_amount_value', 'order_line.order.currency')
amount_value = MoneyValueField(verbose_name=_('tax amount'))
base_amount_value = MoneyValueField(
verbose_name=_('base amount'),
help_text=_('Amount that this tax is calculated from'))
ordering = models.IntegerField(default=0, verbose_name=_('ordering'))
class Meta:
ordering = ["ordering"]
def __str__(self):
return "%s: %s on %s" % (self.name, self.amount, self.base_amount)
|
agpl-3.0
| -6,166,658,032,504,064,000
| 37.806667
| 112
| 0.68356
| false
| 3.837179
| false
| false
| false
|
sissaschool/xmlschema
|
tests/test_files.py
|
1
|
3234
|
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module runs tests on XSD or XML files provided by arguments.
"""
if __name__ == '__main__':
import unittest
import os
import argparse
from xmlschema import XMLSchema10, XMLSchema11
from xmlschema.testing import xsd_version_number, defuse_data, \
make_schema_test_class, make_validation_test_class
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('--version', dest='version', metavar='VERSION',
type=xsd_version_number, default='1.0',
help="XSD schema version to use for testing (default is 1.0).")
parser.add_argument('--inspect', action="store_true", default=False,
help="Inspect using an observed custom schema class.")
parser.add_argument('--defuse', metavar='(always, remote, never)',
type=defuse_data, default='remote',
help="Define when to use the defused XML data loaders. "
"Defuse remote data for default.")
parser.add_argument('--lxml', dest='lxml', action='store_true', default=False,
help='Check also with lxml.etree.XMLSchema (for XSD 1.0)')
parser.add_argument(
'files', metavar='[FILE ...]', nargs='*',
help='Input files. Each argument can be a file path or a glob pathname. '
'A "-" stands for standard input. If no arguments are given then processes '
'all the files included within the scope of the selected applications.'
)
args = parser.parse_args()
if args.version == '1.0':
schema_class = XMLSchema10
check_with_lxml = args.lxml
else:
schema_class = XMLSchema11
check_with_lxml = False
test_num = 1
test_args = argparse.Namespace(
errors=0, warnings=0, inspect=args.inspect, locations=(),
defuse=args.defuse, skip=False, debug=False
)
test_loader = unittest.TestLoader()
test_suite = unittest.TestSuite()
for test_file in args.files:
if not os.path.isfile(test_file):
continue
elif test_file.endswith('xsd'):
test_class = make_schema_test_class(
test_file, test_args, test_num, schema_class, check_with_lxml
)
test_num += 1
elif test_file.endswith('xml'):
test_class = make_validation_test_class(
test_file, test_args, test_num, schema_class, check_with_lxml
)
test_num += 1
else:
continue
print("Add test %r for file %r ..." % (test_class.__name__, test_file))
test_suite.addTest(test_loader.loadTestsFromTestCase(test_class))
if test_num == 1:
print("No XSD or XML file to test, exiting ...")
else:
runner = unittest.TextTestRunner()
runner.run(test_suite)
|
mit
| 6,779,808,727,647,150,000
| 38.439024
| 89
| 0.606988
| false
| 4.037453
| true
| false
| false
|
dwillis/dayspring
|
dayspring/settings.py
|
1
|
5226
|
# Django settings for dayspring project.
import os
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '=5xw*pqse(##*c*+h74^$(t!qa)7=5gx1gua=)8)us+_@t^j*$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dayspring.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'dayspring.wsgi.application'
TEMPLATE_DIRS = (os.path.join(PROJECT_PATH, 'templates'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'localflavor',
'dayspring',
'django.contrib.admin',
'swingtime',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dayspring.db',
}
}
SITE_ID = 3
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
mit
| -976,203,766,647,926,500
| 29.747059
| 79
| 0.69977
| false
| 3.543051
| false
| false
| false
|
andresriancho/moto
|
tests/test_s3/test_server.py
|
1
|
1530
|
import sure # noqa
import moto.server as server
'''
Test the different server responses
'''
def test_s3_server_get():
backend = server.create_backend_app("s3")
test_client = backend.test_client()
res = test_client.get('/')
res.data.should.contain('ListAllMyBucketsResult')
def test_s3_server_bucket_create():
backend = server.create_backend_app("s3")
test_client = backend.test_client()
res = test_client.put('/', 'http://foobaz.localhost:5000/')
res.status_code.should.equal(200)
res = test_client.get('/')
res.data.should.contain('<Name>foobaz</Name>')
res = test_client.get('/', 'http://foobaz.localhost:5000/')
res.status_code.should.equal(200)
res.data.should.contain("ListBucketResult")
res = test_client.put('/bar', 'http://foobaz.localhost:5000/', data='test value')
res.status_code.should.equal(200)
res = test_client.get('/bar', 'http://foobaz.localhost:5000/')
res.status_code.should.equal(200)
res.data.should.equal("test value")
def test_s3_server_post_to_bucket():
backend = server.create_backend_app("s3")
test_client = backend.test_client()
res = test_client.put('/', 'http://tester.localhost:5000/')
res.status_code.should.equal(200)
test_client.post('/', "https://tester.localhost:5000/", data={
'key': 'the-key',
'file': 'nothing'
})
res = test_client.get('/the-key', 'http://tester.localhost:5000/')
res.status_code.should.equal(200)
res.data.should.equal("nothing")
|
apache-2.0
| 8,750,110,255,723,227,000
| 26.818182
| 85
| 0.649673
| false
| 3.269231
| true
| false
| false
|
cojacoo/testcases_echoRD
|
gen_test2211.py
|
1
|
4396
|
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
try:
import cPickle as pickle
except:
import pickle
#connect echoRD Tools
pathdir='../echoRD' #path to echoRD
lib_path = os.path.abspath(pathdir)
#sys.path.append(lib_path)
sys.path.append('/home/ka/ka_iwg/ka_oj4748/echoRD/echoRD')
import vG_conv as vG
from hydro_tools import plotparticles_t,hydroprofile,plotparticles_specht
# Prepare echoRD
#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr,mc,mcp,pdyn,cinf,vG]=rE.loadconnect(pathdir='../',mcinif='mcini_gen2a',experimental=True)
mc = mcp.mcpick_out(mc,'gen_test2a.pickle')
runname='gen_test2211'
mc.advectref='Shipitalo'
mc.soilmatrix=pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1-1/mc.soilmatrix.n,0.1)
mc.md_macdepth=mc.md_depth[np.fmax(2,np.sum(np.ceil(mc.md_contact),axis=1).astype(int))]
mc.md_macdepth[mc.md_macdepth<=0.]=0.065
precTS=pd.read_csv(mc.precf, sep=',',skiprows=3)
precTS.tstart=60
precTS.tend=60+1800
precTS.total=0.01
precTS.intense=precTS.total/(precTS.tend-precTS.tstart)
#use modified routines for binned retention definitions
mc.part_sizefac=500
mc.gridcellA=mc.mgrid.vertfac*mc.mgrid.latfac
mc.particleA=abs(mc.gridcellA.values)/(2*mc.part_sizefac) #assume average ks at about 0.5 as reference of particle size
mc.particleD=2.*np.sqrt(mc.particleA/np.pi)
mc.particleV=3./4.*np.pi*(mc.particleD/2.)**3.
mc.particleV/=np.sqrt(abs(mc.gridcellA.values)) #assume grid size as 3rd dimension
mc.particleD/=np.sqrt(abs(mc.gridcellA.values))
mc.particlemass=dr.waterdensity(np.array(20),np.array(-9999))*mc.particleV #assume 20C as reference for particle mass
#DEBUG: a) we assume 2D=3D; b) change 20C to annual mean T?
mc=dr.ini_bins(mc)
mc=dr.mc_diffs(mc,np.max(np.max(mc.mxbin)))
[mc,particles,npart]=dr.particle_setup(mc)
#define bin assignment mode for infiltration particles
mc.LTEdef='instant'#'ks' #'instant' #'random'
mc.LTEmemory=mc.soilgrid.ravel()*0.
#new reference
mc.maccon=np.where(mc.macconnect.ravel()>0)[0] #index of all connected cells
mc.md_macdepth=np.abs(mc.md_macdepth)
mc.prects=False
#theta=mc.zgrid[:,1]*0.+0.273
#[mc,particles,npart]=rE.particle_setup_obs(theta,mc,vG,dr,pdyn)
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[A,B]=plotparticles_t(particles,thS/100.,mc,vG,store=True)
# Run Model
mc.LTEpercentile=70 #new parameter
t_end=24.*3600.
saveDT=True
#1: MDA
#2: MED
#3: rand
infiltmeth='MDA'
#3: RWdiff
#4: Ediss
#exfiltmeth='RWdiff'
exfiltmeth='Ediss'
#5: film_uconst
#6: dynamic u
film=True
#7: maccoat1
#8: maccoat10
#9: maccoat100
macscale=1. #scale the macropore coating
clogswitch=False
infiltscale=False
#mc.dt=0.11
#mc.splitfac=5
#pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
#import profile
#%prun -D diff_pd_prof.prof pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
wdir='/beegfs/work/ka_oj4748/gen_tests'
drained=pd.DataFrame(np.array([]))
leftover=0
output=60. #mind to set also in TXstore.index definition
dummy=np.floor(t_end/output)
t=0.
ix=0
TSstore=np.zeros((int(dummy),mc.mgrid.cells[0],2))
try:
#unpickle:
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'rb') as handle:
pickle_l = pickle.load(handle)
dummyx = pickle.loads(pickle_l)
particles = pickle.loads(dummyx[0])
[leftover,drained,t,TSstore,ix] = pickle.loads(dummyx[1])
ix+=1
print('resuming into stored run at t='+str(t)+'...')
except:
print('starting new run...')
#loop through plot cycles
for i in np.arange(dummy.astype(int))[ix:]:
plotparticles_specht(particles,mc,pdyn,vG,runname,t,i,saving=True,relative=False,wdir=wdir)
[particles,npart,thS,leftover,drained,t]=rE.CAOSpy_rundx1(i*output,(i+1)*output,mc,pdyn,cinf,precTS,particles,leftover,drained,6.,splitfac=4,prec_2D=False,maccoat=macscale,saveDT=saveDT,clogswitch=clogswitch,infilt_method=infiltmeth,exfilt_method=exfiltmeth,film=film,infiltscale=infiltscale)
TSstore[i,:,:]=rE.part_store(particles,mc)
#if i/5.==np.round(i/5.):
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'wb') as handle:
pickle.dump(pickle.dumps([pickle.dumps(particles),pickle.dumps([leftover,drained,t,TSstore,i])]), handle, protocol=2)
|
gpl-3.0
| 2,470,535,713,098,664,000
| 30.177305
| 296
| 0.720428
| false
| 2.533718
| false
| false
| false
|
bkuczenski/lca-tools
|
antelope_utilities/flowables/create_synonyms.py
|
1
|
4679
|
import os
import json
import re
from antelope_catalog.providers.ecospold2 import EcospoldV2Archive
from antelope_catalog.providers.ilcd import grab_flow_name, IlcdLcia
from antelope_catalog.providers.xml_widgets import find_tag, find_tags, find_common, find_ns
from lcatools.flowdb.synlist import Flowables, InconsistentIndices, ConflictingCas
ECOSPOLD = os.path.join('/data', 'Dropbox', 'data', 'Ecoinvent', '3.2', 'current_Version_3.2_cutoff_lci_ecoSpold02.7z')
ES_FILE = '00009573-c174-463a-8ebf-183ec587ba0d_7cb72345-4338-4f2d-830f-65bba3530fdb.spold'
ELCD = os.path.join('/data', 'Dropbox', 'data', 'ELCD', 'ELCD3.2-a.zip')
SYNONYMS = os.path.join(os.path.dirname(__file__), 'synonyms.json')
def get_ecospold_exchanges(archive=ECOSPOLD, prefix='datasets', file=ES_FILE):
E = EcospoldV2Archive(archive, prefix=prefix)
o = E.objectify(file)
return find_tags(o, 'elementaryExchange')
def ilcd_flow_generator(archive=ELCD, **kwargs):
I = IlcdLcia(archive, **kwargs)
count = 0
for f in I.list_objects('Flow'):
o = I.objectify(f, dtype='Flow')
if o is not None:
yield o
count += 1
if count % 1000 == 0:
print('%d data sets completed' % count)
def _add_syn_if(syn, synset):
g = syn.strip()
if g != '' and g != 'PSM':
synset.add(syn)
def synonyms_from_ecospold_exchange(exch):
"""
Ecospold exchanges: synonyms are Name, CAS Number, and ', '-separated contents of synonym tags.
Care must be taken not to split on ',' as some chemical names include commas
:param exch:
:return: set of synonyms (stripped)
"""
syns = set()
name = str(exch['name'])
syns.add(name)
cas = exch.get('casNumber')
if cas is not None:
syns.add(cas)
synonym_tag = find_tags(exch, 'synonym')
if len(synonym_tag) == 1:
# parse the comma-separated list
if bool(re.search('etc\.', str(synonym_tag[0]))):
syns.add(str(synonym_tag[0]).strip())
else:
for x in str(synonym_tag[0]).split(', '):
_add_syn_if(x, syns)
else:
# multiple entries- allow embedded comma-space
for syn in synonym_tag:
_add_syn_if(str(syn), syns)
return name, syns
def synonyms_from_ilcd_flow(flow):
"""
ILCD flow files have long synonym blocks at the top. They also have a CAS number and a basename.
:param flow:
:return:
"""
ns = find_ns(flow.nsmap, 'Flow')
syns = set()
name = grab_flow_name(flow, ns=ns)
syns.add(name)
uid = str(find_common(flow, 'UUID')).strip()
syns.add(uid)
cas = str(find_tag(flow, 'CASNumber', ns=ns)).strip()
if cas != '':
syns.add(cas)
for syn in find_tags(flow, 'synonyms', ns='common'):
for x in str(syn).split(';'):
if x.strip() != '' and x.strip().lower() != 'wood':
syns.add(x.strip())
return name, syns, uid
cas_regex = re.compile('^[0-9]{,6}-[0-9]{2}-[0-9]$')
def _add_set(synlist, name, syns, xid):
try:
index = synlist.add_set(syns, merge=True, name=name)
except ConflictingCas:
index = synlist.new_set(syns, name=name)
except InconsistentIndices:
dups = synlist.find_indices(syns)
matches = []
for i in dups:
for j in syns:
if j in synlist[i]:
matches.append((j, i))
break
try:
index = synlist.merge_indices(dups)
print('Merged Inconsistent indices in ID %s, e.g.:' % xid)
for match in matches:
print(' [%s] = %d' % match)
except ConflictingCas:
# print('Conflicting CAS on merge.. creating new group')
index = synlist.new_set(syns, name=name)
return index
def create_new_synonym_list():
"""
This just makes a SynList and populates it, first with ecoinvent, then with ILCD, and saves it to disk
:return:
"""
synonyms = Flowables()
# first, ecoinvent
exchs = get_ecospold_exchanges()
for exch in exchs:
name, syns = synonyms_from_ecospold_exchange(exch)
_add_set(synonyms, name, syns, exch.get('id'))
# next, ILCD - but hold off for now
for flow in ilcd_flow_generator():
name, syns, uid = synonyms_from_ilcd_flow(flow)
_add_set(synonyms, name, syns, uid)
with open(SYNONYMS, 'w') as fp:
json.dump(synonyms.serialize(), fp)
print('Wrote synonym file to %s' % SYNONYMS)
return synonyms
def load_synonyms(file=SYNONYMS):
with open(file) as fp:
return Flowables.from_json(json.load(fp))
|
gpl-2.0
| -5,978,112,579,486,739,000
| 30.829932
| 119
| 0.602265
| false
| 3.048208
| false
| false
| false
|
dylanfried/yesterday-tomorrow
|
OperationOrganism.py
|
1
|
7841
|
from copy import copy
import random
from helpers import shift
class OperationOrganism:
length = 150
def __init__(self,genome=None,target=None,population=None):
self.population = population
if genome:
self.genome = genome[:]
else:
self.genome = [(random.sample([1,2,3,4,5,6] if i < self.length/2 else [2,3,4,4,4,4,4],1)[0],random.randint(-20,20)) for i in range(self.length)]
def random_genome(self,length):
return
def mutate(self,gene_range,mutate_max):
''' Return a mutated organism '''
c = self.copy()
for i in range(len(c.genome)):
if random.random() < 0.02:
# New random gene replacement
c.genome[i] = (random.sample([1,2,3,4,5,6] if i < self.length/2 else [2,3,4,4,4,4,4],1)[0],random.randint(-20,20))
elif random.random() < 0.02:
# Permute just the operand
c.genome[i] = (c.genome[i][0],c.genome[i][1] + random.randint(-mutate_max,mutate_max))
return c
def crossover(self,organism):
''' Return an organism that is a crossover between this organism and the provided organism '''
c1 = self.copy()
c2 = organism.copy()
for i in range(min(len(c1.genome),len(c2.genome))):
if random.random() < 0.3:
c1.genome[i] = organism.genome[i]
c2.genome[i] = self.genome[i]
return [c1,c2]
def onepointcrossover(self,organism):
inflection_point = random.randint(0,len(organism.genome)-1)
c1 = self.copy()
c2 = organism.copy()
genome1 = c1.genome[:inflection_point] + c2.genome[inflection_point:]
genome2 = c2.genome[:inflection_point] + c1.genome[inflection_point:]
c1.genome = genome1
c2.genome = genome2
return [c1,c2]
def calculate_fitness(self,target,other_genomes=None):
''' Calculate the fitness of this organism '''
# First, must resolve
result = self.resolve(target[0],target[1])
final_pattern = target[1]
p_common = 0
p_correct = 0
p_common = float(len([1 for item in result if item in final_pattern]))/float(max(len(result),len(final_pattern)))
for idx,item in enumerate(result):
if idx < len(final_pattern) and item == final_pattern[idx]:
p_correct += 1
p_correct = float(p_correct)/float(max(len(result),len(final_pattern)))
self.fitness = 1.0 - 0.5*(p_common + p_correct)
self.fitness_level = self.length-1
return
result_path = self.resolve(target[0],target[1],record_path=True)
final_pattern = target[1]
self.fitness = 1
for level,result in enumerate(result_path):
p_common = 0
p_correct = 0
p_common = float(len([1 for item in result if item in final_pattern]))/float(max(len(result),len(final_pattern)))
for idx,item in enumerate(result):
if idx < len(final_pattern) and item == final_pattern[idx]:
p_correct += 1
p_correct = float(p_correct)/float(max(len(result),len(final_pattern)))
fitness = 1.0 - 0.5*(p_common + p_correct)
if fitness < self.fitness:
self.fitness = fitness
self.fitness_level = level
def copy(self):
c = OperationOrganism(genome=self.genome)
return c
def resolve(self,start_pattern,final_pattern,record_path=False):
result = start_pattern[:]
if record_path:
path = [result[:]]
for operation_tuple in self.genome:
operation = operation_tuple[0]
operand = operation_tuple[1]
if operation == 1:
# no op
pass
elif operation == 2:
# add
#index = random.randint(0,len(final_pattern)-1)
index = operand % len(final_pattern)
result[index:index] = [final_pattern[index]]
elif operation == 3 and len(result) > 0:
# delete
#index = random.randint(0,len(result)-1)
index = operand % len(result)
del result[index]
elif operation == 4 and len(result) > 0:
# mutate
#index = random.randint(0,min(len(result)-1,len(final_pattern)-1))
index = operand % min(len(result),len(final_pattern))
result[index] = final_pattern[index]
elif operation == 5 and len(result) > 0 and operand != 0:
# rotation
amount = (operand/abs(operand)) * (operand % len(result))
result = shift(result,amount)
elif operation == 6 and len(result) > 0:
# exchange
index1 = operand % len(result)
index2 = (operand+1)%len(result)
result[index1],result[index2] = result[index2],result[index1]
elif operation == 7 and len(result) > 0 and operand != 0:
# incorrect rotation
# Only rotate incorrect notes
notes_to_shift_positions = []
notes_to_shift = []
for i in range(len(result)):
if i >= len(final_pattern) or final_pattern[i] != result[i]:
# This note should be shifted
notes_to_shift_positions.append(i)
notes_to_shift.append(result[i])
# Now do the actual shifting of the notes
amount = (operand/abs(operand)) * (operand % len(notes_to_shift))
notes_to_shift = shift(notes_to_shift,amount)
for i in range(len(notes_to_shift)):
result[notes_to_shift_positions[i]] = notes_to_shift[i]
elif operation == 8 and len(result) > 0:
# incorrect exchange
# Only exchange incorrect notes
found = False
for i in range(len(result)):
index1 = (operand+i) % len(result)
if index1 >= len(final_pattern) or final_pattern[index1] != result[index1]:
found = True
break
if found:
found = False
for i in range(len(result)):
index2 = (index1+i+1) % len(result)
if index2 >= len(final_pattern) or final_pattern[index2] != result[index2]:
found = True
break
if found:
result[index1],result[index2] = result[index2],result[index1]
if record_path:
path.append(result[:])
if record_path:
return path
else:
return result
def best_path(self,start_pattern,final_pattern,condense=[1]):
#condense = [i for i in condense for j in range(i)]
to_return = []
result_path = self.resolve(start_pattern,final_pattern,record_path=True)
for j in range(len(condense)):
print "J",j
c = condense[j]
print len(condense),self.fitness_level+1,int((self.fitness_level+1)/len(condense))
start = int((self.fitness_level+1)/len(condense))*j
stop = int((self.fitness_level+1)/len(condense))*(j+1)
print "start",start,"stop",stop
for i in range(start,stop,c):
print "i",i
to_return += result_path[i] + [(0,2,[])]
to_return += result_path[-1] + [(0,2,[])]
#to_return += final_pattern
return to_return
|
apache-2.0
| 6,161,296,623,014,132,000
| 43.050562
| 156
| 0.523275
| false
| 3.879762
| false
| false
| false
|
abelfunctions/abelfunctions
|
abelfunctions/riemann_theta/deprecated/riemanntheta.py
|
1
|
31845
|
"""
Computing Riemann Theta Functions
This module implements the algorithms for computing Riemann theta
functions and their derivatives featured in the paper *"Computing
Riemann Theta Functions"* by Deconinck, Heil, Bobenko, van Hoeij, and
Schmies [CRTF].
**DEFINITION OF THE RIEMANN THETA FUNCTION:**
Let `g` be a positive integer, the *genus* of the Riemann theta
function. Let `H_g` denote the Siegel upper half space of dimension
`g(g+1)/2` over `\CC` , that is the space of symmetric complex
matrices whose imaginary parts are positive definite. When `g = 1`,
this is just the complex upper half plane.
The Riemann theta function `\theta : \CC^g \times H_g \to \CC` is
defined by the infinite series
.. math::
\theta( z | \Omega ) = \sum_{ n \in \ZZ^g } e^{ 2 \pi i \left( \tfrac{1}{2} n \cdot \Omega n + n \cdot z \right) }
It is holomorphic in both `z` and `\Omega`. It is quasiperiodic in `z`
with respect to the lattice `\{ M + \Omega N | M,N \in \ZZ^g \}`,
meaning that `\theta(z|\Omega)` is periodic upon translation of `z` by
vectors in `\ZZ^g` and periodic up to a multiplicative exponential
factor upon translation of `z` by vectors in `\Omega \ZZ^g`. As a
consequence, `\theta(z | \Omega)` has exponential growth in the
imaginary parts of `z`.
When `g=1`, the Riemann theta function is the third Jacobi theta
function.
.. math::
\theta( z | \Omega) = \theta_3(\pi z | \Omega) = 1 + 2 \sum_{n=1}^\infty e^{i \pi \Omega n^2} \cos(2 \pi n z)
Riemann theta functions are the fundamental building blocks for
Abelian functions, which generalize the classical elliptic functions
to multiple variables. Like elliptic functions, Abelian functions and
consequently Riemann theta functions arise in many applications such
as integrable partial differential equations, algebraic geometry, and
optimization.
For more information about the basic facts of and definitions
associated with Riemann theta funtions, see the Digital Library of
Mathematics Functions ``http://dlmf.nist.gov/21``.
**ALGORITHM:**
The algorithm in [CRTF] is based on the observation that the
exponential growth of `\theta` can be factored out of the sum. Thus,
we only need to find an approximation for the oscillatory part. The
derivation is omitted here but the key observation is to write `z = x
+ i y` and `\Omega = X + i Y` where `x`, `y`, `X`, and `Y` are real
vectors and matrices. With the exponential growth part factored out
of the sum, the goal is to find the integral points `n \in \ZZ^g` such
that the sum over these points is within `O(\epsilon)` accuracy of the
infinite sum, for a given `z \in \CC^g` and numerical accuracy
`\epsilon`.
By default we use the uniform approximation formulas which use the
same integral points for all `z` for a fixed `\Omega`. This can be
changed by setting ``uniform=False``. This is ill-advised if you need
to compute the Riemann theta function for a fixed `\Omega` for many
different `z`.
**REFERENCES:**
- [CRTF] Computing Riemann Theta Functions. Bernard Deconinck, Matthias
Heil, Alexander Bobenko, Mark van Hoeij and Markus Schmies. Mathematics
of Computation 73 (2004) 1417-1442. The paper is available at
http://www.amath.washington.edu/~bernard/papers/pdfs/computingtheta.pdf.
Accompanying Maple code is available at
http://www.math.fsu.edu/~hoeij/RiemannTheta/
- Digital Library of Mathematics Functions - Riemann Theta Functions ( http://dlmf.nist.gov/21 ).
**AUTHORS:**
- Chris Swierczewski (2011-11): major overhaul to match notation of
[CRTF], numerous bug fixes, documentation, doctests, symbolic
evaluation
- Grady Williams (2012-2013)
"""
import numpy as np
import scipy as sp
import scipy.linalg as la
import riemanntheta_cy
from scipy.special import gamma, gammaincc, gammainccinv,gammaincinv
from scipy.optimize import fsolve
import time
from lattice_reduction import lattice_reduce
#from riemanntheta_omegas import RiemannThetaOmegas
gpu_capable = True
try:
from riemanntheta_cuda import RiemannThetaCuda
except ImportError:
gpu_capable = False
class RiemannTheta_Function(object):
r"""
Creates an instance of the Riemann theta function parameterized by a
Riemann matrix ``Omega``, directional derivative ``derivs``, and
derivative evaluation accuracy radius. See module level documentation
for more information about the Riemann theta function.
The Riemann theta function `\theta : \CC^g \times H_g \to \CC` is defined
by the infinite series
.. math::
\theta( z | \Omega ) = \sum_{ n \in \ZZ^g } e^{ 2 \pi i \left( \tfrac{1}{2} \langle \Omega n, n \rangle + \langle z, n \rangle \right) }
The precision of Riemann theta function evaluation is determined by
the precision of the base ring.
As shown in [CRTF], `n` th order derivatives introduce polynomial growth in
the oscillatory part of the Riemann theta approximations thus making a
global approximation formula impossible. Therefore, one must specify
a ``deriv_accuracy_radius`` of guaranteed accuracy when computing
derivatives of `\theta(z | \Omega)`.
INPUT:
- ``Omega`` -- a Riemann matrix (symmetric with positive definite imaginary part)
- ``deriv`` -- (default: ``[]``) a list of `g`-tuples representing a directional derivative of `\theta`. A list of `n` lists represents an `n`th order derivative.
- ``uniform`` -- (default: ``True``) a unform approximation allows the accurate computation of the Riemann theta function without having to recompute the integer points over which to take the finite sum. See [CRTF] for a more in-depth definition.
- ``deriv_accuracy_radius`` -- (default: 5) the guaranteed radius of accuracy in computing derivatives of theta. This parameter is necessary due to the polynomial growth of the non-doubly exponential part of theta
OUTPUT:
- ``Function_RiemannTheta`` -- a Riemann theta function parameterized by the Riemann matrix `\Omega`, derivatives ``deriv``, whether or not to use a uniform approximation, and derivative accuracy radius ``deriv_accuracy_radius``.
.. note::
For now, only second order derivatives are implemented. Approximation
formulas are derived in [CRTF]. It is not exactly clear how to
generalize these formulas. In most applications, second order
derivatives are suficient.
"""
def __init__(self, uniform=True, deriv_accuracy_radius=5, tileheight = 32, tilewidth = 16):
"""
Defines parameters in constructed class instance.
"""
self.uniform = uniform
self.deriv_accuracy_radius = deriv_accuracy_radius
# cache radii, intpoints, and inverses
self._rad = None
self._intpoints = None
self._Omega = None
self._Yinv = None
self._T = None
self._Tinv = None
self._prec = 1e-8
if (gpu_capable):
self.parRiemann = RiemannThetaCuda(tileheight, tilewidth)
def lattice(self):
r"""
Compute the complex lattice corresponding to the Riemann matix.
.. note::
Not yet implemented.
"""
raise NotImplementedError()
def genus(self):
r"""
The genus of the algebraic curve from which the Riemann matrix is
calculated. If $\Omega$ is not block decomposable then this is just
the dimension of the matrix.
.. note::
Block decomposablility detection is difficult and not yet
implemented. Currently, ``self.genus()`` just returns the size
of the matrix.
"""
return NotImplementedError()
def find_int_points(self,g, c, R, T,start):
r"""
Recursive helper function for computing the integer points needed in
each coordinate direction.
INPUT:
- ``g`` -- the genus. recursively used to determine integer
points along each axis.
- ``c`` -- center of integer point computation. `0 \in \CC^g`
is used when using the uniform approximation.
- ``R`` -- the radius of the ellipsoid along the current axis.
- ``start`` -- the starting integer point for each recursion
along each axis.
OUTPUT:
- ``intpoints`` -- (list) a list of all of the integer points
inside the bounding ellipsoid along a single axis
... todo::
Recursion can be memory intensive in Python. For genus `g<30`
this is a reasonable computation but can be sped up by
writing a loop instead.
"""
a_ = c[g] - R/(np.sqrt(np.pi)*T[g,g])
b_ = c[g] + R/(np.sqrt(np.pi)*T[g,g])
a = np.ceil(a_)
b = np.floor(b_)
# check if we reached the edge of the ellipsoid
if not a <= b: return np.array([])
# last dimension reached: append points
if g == 0:
points = np.array([])
for i in range(a, b+1):
#Note that this algorithm works backwards on the coordinates,
#the last coordinate found is x1 if our coordinates are {x1,x2, ... xn}
points = np.append(np.append([i],start), points)
return points
#
# compute new shifts, radii, start, and recurse
#
newg = g-1
newT = T[:(newg+1),:(newg+1)]
newTinv = la.inv(newT)
pts = []
for n in range(a, b+1):
chat = c[:newg+1]
that = T[:newg+1,g]
newc = (chat.T - (np.dot(newTinv, that)*(n - c[g]))).T
newR = np.sqrt(R**2 - np.pi*(T[g,g] * (n - c[g]))**2) # XXX
newstart = np.append([n],start)
newpts = self.find_int_points(newg,newc,newR,newT,newstart)
pts = np.append(pts,newpts)
return pts
def integer_points(self, Yinv, T, z, g, R):
"""
The set, `U_R`, of the integral points needed to compute Riemann
theta at the complex point $z$ to the numerical precision given
by the Riemann matirix base field precision.
The set `U_R` of [CRTF], (21).
.. math::
\left\{ n \in \ZZ^g : \pi ( n - c )^{t} \cdot Y \cdot
(n - c ) < R^2, |c_j| < 1/2, j=1,\ldots,g \right\}
Since `Y` is positive definite it has Cholesky decomposition
`Y = T^t T`. Letting `\Lambda` be the lattice of vectors
`v(n), n \in ZZ^g` of the form `v(n)=\sqrt{\pi} T (n + [[ Y^{-1} n]])`,
we have that
.. math::
S_R = \left\{ v(n) \in \Lambda : || v(n) || < R \right\} .
Note that since the integer points are only required for oscillatory
part of Riemann theta all over these points are near the point
`0 \in \CC^g`. Additionally, if ``uniform == True`` then the set of
integer points is independent of the input points `z \in \CC^g`.
.. note::
To actually compute `U_R` one needs to compute the convex hull of
`2^{g}` bounding ellipsoids. Since this is computationally
expensive, an ellipsoid centered at `0 \in \CC^g` with large
radius is computed instead. This can cause accuracy issues with
ill-conditioned Riemann matrices, that is, those that produce
long and narrow bounding ellipsoies. See [CRTF] Section ### for
more information.
INPUTS:
- ``Yinv`` -- the inverse of the imaginary part of the Riemann matrix
`\Omega`
- ``T`` -- the Cholesky decomposition of the imaginary part of the
Riemann matrix `\Omega`
- ``z`` -- the point `z \in \CC` at which to compute `\theta(z|\Omega)`
- ``R`` -- the first ellipsoid semi-axis length as computed by ``self.radius()``
"""
# g = Yinv.shape[0]
pi = np.pi
z = np.array(z).reshape((g,1))
x = z.real
y = z.imag
# determine center of ellipsoid.
if self.uniform:
c = np.zeros((g,1))
intc = np.zeros((g,1))
leftc = np.zeros((g,1))
else:
c = Yinv * y
intc = c.round()
leftc = c - intc
int_points = self.find_int_points(g-1,leftc,R,T,[])
return int_points
def radius(self, T, prec, deriv=[]):
r"""
Calculate the radius `R` to compute the value of the theta function
to within `2^{-P + 1}` bits of precision where `P` is the
real / complex precision given by the input matrix. Used primarily
by ``RiemannTheta.integer_points()``.
`R` is the radius of [CRTF] Theorems 2, 4, and 6.
Input
-----
- ``T`` -- the Cholesky decomposition of the imaginary part of the
Riemann matrix `\Omega`
- ``prec`` -- the desired precision of the computation
- ``deriv`` -- (list) (default=``[]``) the derivative, if given.
Radius increases as order of derivative increases.
"""
Pi = np.pi
I = 1.0j
g = np.float64(T.shape[0])
# compute the length of the shortest lattice vector
#U = qflll(T)
A = lattice_reduce(T)
r = min(la.norm(A[:,i]) for i in range(int(g)))
normTinv = la.norm(la.inv(T))
# solve for the radius using:
# * Theorem 3 of [CRTF] (no derivative)
# * Theorem 5 of [CRTF] (first order derivative)
# * Theorem 7 of [CRTF] (second order derivative)
if len(deriv) == 0:
eps = prec
lhs = eps * (2.0/g) * (r/2.0)**g * gamma(g/2.0)
ins = gammainccinv(g/2.0,lhs)
R = np.sqrt(ins) + r/2.0
rad = max( R, (np.sqrt(2*g)+r)/2.0)
elif len(deriv) == 1:
# solve for left-hand side
L = self.deriv_accuracy_radius
normderiv = la.norm(np.array(deriv[0]))
eps = prec
lhs = (eps * (r/2.0)**g) / (np.sqrt(Pi)*g*normderiv*normTinv)
# define right-hand-side function involving the incomplete gamma
# function
def rhs(ins):
"""
Right-hand side function for computing the bounding ellipsoid
radius given a desired maximum error bound for the first
derivative of the Riemann theta function.
"""
return gamma((g+1)/2)*gammaincc((g+1)/2, ins) + \
np.sqrt(Pi)*normTinv*L * gamma(g/2)*gammaincc(g/2, ins) - \
float(lhs)
# define lower bound (guess) and attempt to solve for the radius
lbnd = np.sqrt(g+2 + np.sqrt(g**2+8)) + r
try:
ins = fsolve(rhs, float(lbnd))[0]
except RuntimeWarning:
# fsolve had trouble finding the solution. We try
# a larger initial guess since the radius increases
# as desired precision increases
try:
ins = fsolve(rhs, float(2*lbnd))[0]
except RuntimeWarning:
raise ValueError, "Could not find an accurate bound for the radius. Consider using higher precision."
# solve for radius
R = np.sqrt(ins) + r/2.0
rad = max(R,lbnd)
elif len(deriv) == 2:
# solve for left-hand side
L = self.deriv_accuracy_radius
prodnormderiv = np.prod([la.norm(d) for d in deriv])
eps = prec
lhs = (eps*(r/2.0)**g) / (2*Pi*g*prodnormderiv*normTinv**2)
# define right-hand-side function involving the incomplete gamma
# function
def rhs(ins):
"""
Right-hand side function for computing the bounding ellipsoid
radius given a desired maximum error bound for the second
derivative of the Riemann theta function.
"""
return gamma((g+2)/2)*gammaincc((g+2)/2, ins) + \
2*np.sqrt(Pi)*normTinv*L * \
gamma((g+1)/2)*gammaincc((g+1)/2,ins) + \
Pi*normTinv**2*L**2 * \
gamma(g/2)*gammaincc(g/2,ins) - float(lhs)
# define lower bound (guess) and attempt to solve for the radius
lbnd = np.sqrt(g+4 + np.sqrt(g**2+16)) + r
try:
ins = fsolve(rhs, float(lbnd))[0]
except RuntimeWarning:
# fsolve had trouble finding the solution. We try
# a larger initial guess since the radius increases
# as desired precision increases
try:
ins = fsolve(rhs, float(2*lbnd))[0]
except RuntimeWarning:
raise ValueError, "Could not find an accurate bound for the radius. Consider using higher precision."
# solve for radius
R = np.sqrt(ins) + r/2.0
rad = max(R,lbnd)
else:
# can't computer higher derivatives, yet
raise NotImplementedError("Ellipsoid radius for first and second derivatives not yet implemented.")
return rad
"""
Performs simple re-cacheing of matrices, also prepares them for gpu for processing if necessary.
Input
-----
Omega - the Riemann matrix
X - The real part of Omega
Y - The imaginary part of Omega
Yinv - The inverse of Y
T - The Cholesky Decomposition of Y
g - The genus of the Riemann theta function
prec - The desired precision
deriv - the set of derivatives to compute (Possibly an empty set)
Tinv - The inverse of T
Output
-----
Data structures ready for GPU computation.
"""
def recache(self, Omega, X, Y, Yinv, T, g, prec, deriv, Tinv, batch):
recache_omega = not np.array_equal(self._Omega, Omega)
recache_prec = self._prec != prec
#Check if we've already computed the uniform radius and intpoints for this Omega/Precision
if (recache_omega or recache_prec):
#If not recompute the integer summation set.
self._prec = prec
self._rad = self.radius(T, prec, deriv=deriv)
origin = [0]*g
self._intpoints = self.integer_points(Yinv, T, origin,
g, self._rad)
#If gpu_capable is set to true and batch is set to true then the data structures need to
#be loaded onto the GPU for computation. This code loads them onto the GPU and compiles
#the pyCuda functions.
if (gpu_capable and batch):
self.parRiemann.cache_intpoints(self._intpoints)
#Check if the gpu functions depending on the genus and Omega need to be compiled/recompiled
if (self._Omega is None or not g == self._Omega.shape[0] or self.parRiemann.g is None):
self.parRiemann.compile(g)
self.parRiemann.cache_omega_real(X)
self.parRiemann.cache_omega_imag(Yinv, T)
#Check if the gpu functions depending only on Omega need to be recompiled
else:
#Check if the gpu functions depending on the real part of Omega need to be recompiled
if (not np.array_equal(self._Omega.real, Omega.real)):
self.parRiemann.cache_omega_real(X)
#Check if the gpu functions depending on the imaginary part of Omega need to be recompiled
if (not np.array_equal(self._Omega.imag, Omega.imag)):
self.parRiemann.cache_omega_imag(Yinv, T)
self._Omega = Omega
"""
Handles calls to the GPU.
Input
-----
Z - the set of points to compute theta(z, Omega) at.
deriv - The derivatives to compute (possibly an empty list)
gpu_max - The maximum number of points to compute on the GPU at once
length - The number of points we're computing. (ie. length == |Z|)
Output
-------
u - A list of the exponential growth terms of theta (or deriv(theta)) for each z in Z
v - A list of the approximations of the infite sum of theta (or deriv(theta)) for each z in Z
"""
def gpu_process(self, Z, deriv, gpu_max, length):
v = np.array([])
u = np.array([])
#divide the set z into as many partitions as necessary
num_partitions = (length-1)//(gpu_max) + 1
for i in range(0, num_partitions):
#determine the starting and stopping points of the partition
p_start = (i)*gpu_max
p_stop = min(length, (i+1)*gpu_max)
if (len(deriv) > 0):
v_p = self.parRiemann.compute_v_with_derivs(Z[p_start: p_stop, :], deriv)
else:
v_p = self.parRiemann.compute_v_without_derivs(Z[p_start: p_stop, :])
u_p = self.parRiemann.compute_u()
u = np.concatenate((u, u_p))
v = np.concatenate((v, v_p))
return u,v
"""
Computes the exponential and oscillatory part of the Riemann theta function. Or the directional
derivative of theta.
Input
-----
z - The point (or set of points) to compute the Riemann Theta function at. Note that if z is a set of
points the variable "batch" must be set to true. If z is a single point itshould be in the form of a
1-d numpy array, if z is a set of points it should be a list or 1-d numpy array of 1-d numpy arrays.
Omega - The Riemann matrix
batch - A variable that indicates whether or not a batch of points is being computed.
prec - The desired digits of precision to compute theta up to. Note that precision is limited to double
precision which is about ~15 decimal points.
gpu - Indicates whether or not to do batch computations on a GPU, the default is set to yes if the proper
pyCuda libraries are installed and no otherwise.
gpu_max - The maximum number of points to be computed on a GPU at once.
Output
------
u - A list of the exponential growth terms of theta (or deriv(theta)) for each z in Z
v - A list of the approximations of the infite sum of theta (or deriv(theta)) for each z in Z
"""
def exp_and_osc_at_point(self, z, Omega, batch = False, prec=1e-12, deriv=[], gpu=gpu_capable, gpu_max = 500000):
g = Omega.shape[0]
pi = np.pi
#Process all of the matrices into numpy matrices
X = np.array(Omega.real)
Y = np.array(Omega.imag)
Yinv = np.array(la.inv(Y))
T = np.array(la.cholesky(Y))
Tinv = np.array(la.inv(T))
deriv = np.array(deriv)
#Do recacheing if necessary
self.recache(Omega, X, Y, Yinv, T, g, prec, deriv, Tinv, batch)
# extract real and imaginary parts of input z
length = 1
if batch:
length = len(z)
z = np.array(z).reshape((length, g))
# compute integer points: check for uniform approximation
if self.uniform:
R = self._rad
S = self._intpoints
elif(batch):
raise Exception("Can't compute pointwise approximation for multiple points at once.\nUse uniform approximation or call the function seperately for each point.")
else:
R = self.radius(T, prec, deriv=deriv)
S = self.integer_points(Yinv, T,
Tinv, z, g, R)
#Compute oscillatory and exponential terms
if gpu and batch and (length > gpu_max):
u,v = self.gpu_process(z, deriv, gpu_max, length)
elif gpu and batch and len(deriv) > 0:
v = self.parRiemann.compute_v_with_derivs(z, deriv)
elif gpu and batch:
v = self.parRiemann.compute_v_without_derivs(z)
elif (len(deriv) > 0):
v = riemanntheta_cy.finite_sum_derivatives(X, Yinv, T, z, S, deriv, g, batch)
else:
v = riemanntheta_cy.finite_sum(X, Yinv, T, z, S, g, batch)
if (length > gpu_max and gpu):
#u already computed
pass
elif (gpu and batch):
u = self.parRiemann.compute_u()
elif (batch):
K = len(z)
u = np.zeros(K)
for i in range(K):
w = np.array([z[i,:].imag])
val = np.pi*np.dot(w, np.dot(Yinv,w.T)).item(0,0)
u[i] = val
else:
u = np.pi*np.dot(z.imag,np.dot(Yinv,z.imag.T)).item(0,0)
return u,v
def exponential_part(self, *args, **kwds):
return self.exp_and_osc_at_point(*args, **kwds)[0]
def oscillatory_part(self, *args, **kwds):
return self.exp_and_osc_at_point(*args, **kwds)[1]
"""
TODO: Add documentation
"""
def characteristic(self, chars, z, Omega, deriv = [], prec=1e-8):
val = 0
z = np.matrix(z).T
alpha, beta = np.matrix(chars[0]).T, np.matrix(chars[1]).T
z_tilde = z + np.dot(Omega,alpha) + beta
if len(deriv) == 0:
u,v = self.exp_and_osc_at_point(z_tilde, Omega)
quadratic_term = np.dot(alpha.T, np.dot(Omega,alpha))[0,0]
exp_shift = 2*np.pi*1.0j*(.5*quadratic_term + np.dot(alpha.T, (z + beta)))
theta_val = np.exp(u + exp_shift)*v
elif len(deriv) == 1:
d = deriv[0]
scalar_term = np.exp(2*np.pi*1.0j*(.5*np.dot(alpha.T, np.dot(Omega, alpha)) + np.dot(alpha.T, (z + beta))))
alpha_part = 2*np.pi*1.0j*alpha
theta_eval = self.value_at_point(z_tilde, Omega, prec=prec)
term1 = np.dot(theta_eval*alpha_part.T, d)
term2 = self.value_at_point(z_tilde, Omega, prec=prec, deriv=d)
theta_val = scalar_term*(term1 + term2)
elif len(deriv) == 2:
d1,d2 = np.matrix(deriv[0]).T, np.matrix(deriv[1]).T
scalar_term = np.exp(2*np.pi*1.0j*(.5*np.dot(alpha.T, np.dot(Omega, alpha))[0,0] + np.dot(alpha.T, (z + beta))[0,0]))
#Compute the non-theta hessian
g = Omega.shape[0]
non_theta_hess = np.zeros((g, g), dtype = np.complex128)
theta_eval = self.value_at_point(z_tilde, Omega, prec=prec)
theta_grad = np.zeros(g, dtype=np.complex128)
for i in range(g):
partial = np.zeros(g)
partial[i] = 1.0
theta_grad[i] = self.value_at_point(z_tilde, Omega, prec = prec, deriv = partial)
for n in range(g):
for k in range(g):
non_theta_hess[n,k] = 2*np.pi*1.j*alpha[k,0] * (2*np.pi*1.j*theta_eval*alpha[n,0] + theta_grad[n]) + (2*np.pi*1.j*theta_grad[k]*alpha[n,0])
term1 = np.dot(d1.T, np.dot(non_theta_hess, d2))[0,0]
term2 = self.value_at_point(z_tilde, Omega, prec=prec, deriv=deriv)
theta_val = scalar_term*(term1 + term2)
else:
return NotImplementedError()
return theta_val
r"""
Returns the value of `\theta(z,\Omega)` at a point `z` or set of points if batch is True.
"""
def value_at_point(self, z, Omega, prec=1e-8, deriv=[], gpu=gpu_capable, batch=False):
exp_part, osc_part = self.exp_and_osc_at_point(z, Omega, prec=prec,
deriv=deriv, gpu=gpu,batch=batch)
return np.exp(exp_part) * osc_part
def __call__(self, z, Omega, prec=1e-8, deriv=[], gpu=gpu_capable, batch=False):
r"""
Returns the value of `\theta(z,\Omega)` at a point `z`. Lazy evaluation
is done if the input contains symbolic variables. If batch is set to true
then the functions expects a list/numpy array as input and returns a numpy array as output
"""
return self.value_at_point(z, Omega, prec=prec, deriv=deriv, gpu=gpu, batch=batch)
# declaration of Riemann theta
RiemannTheta = RiemannTheta_Function()
if __name__=="__main__":
print "=== Riemann Theta ==="
theta = RiemannTheta
z = np.array([0,0])
Omega = np.matrix([[1.0j,-0.5],[-0.5,1.0j]])
print "Test #1:"
print theta.value_at_point(z, Omega, batch = False)
print "1.1654 - 1.9522e-15*I"
print
print "Test #2:"
z1 = np.array([1.0j,1.0j])
print theta.value_at_point(z1,Omega)
print "-438.94 + 0.00056160*I"
print
print "Batch Test"
z0 = np.array([0, 0])
z1 = np.array([1.0j,1.0j])
z2 = np.array([.5 + .5j, .5 + .5j])
z3 = np.array([0 + .5j, .33 + .8j])
z4 = np.array([.345 + .768j, -44 - .76j])
print theta.value_at_point([z0,z1,z2,z3,z4],Omega, batch=True)
print
if (gpu_capable):
a = np.random.rand(10)
b = np.random.rand(10)
c = max(b)
b = 1.j*b/(1.0*c)
a = a + b
print a.size
a = a.reshape(5,2)
start1 = time.clock()
print theta.value_at_point(a, Omega, batch=True, prec=1e-12)
print("GPU time to perform calculation: " + str(time.clock() - start1))
start2 = time.clock()
print theta.value_at_point(a, Omega, gpu=False, batch=True,prec=1e-12)
print("CPU time to do same calculation: " + str(time.clock() - start2))
print
print "Derivative Tests:"
print "Calculating directional derivatives at z = [i, 0]"
print
y = np.array([1.0j, 0])
print "For [[1,0]]:"
print theta.value_at_point(y, Omega, deriv = [[1,0]])
print "0 - 146.49i"
print
print "For [[1,0] , [0,1]]: "
print theta.value_at_point(y, Omega, deriv = [[1,0], [0,1]])
print "0 + 0i"
print
print "For [[0,1], [1,0]]: "
print theta.value_at_point(y, Omega, deriv = [[0,1], [1,0]])
print "0 + 0i"
print
print "For [[1,0],[1,0],[1,1]]:"
print theta.value_at_point(y, Omega, deriv = [[1,0], [1,0], [1,1]])
print "0 + 7400.39i"
print
print "For [[1,1],[1,1],[1,1],[1,1]]: "
print theta.value_at_point(y, Omega, deriv = [[1,1],[1,1],[1,1],[1,1]])
print "41743.92 + 0i"
print
print ("GPU Derivative Test")
l = []
for x in range(5):
l.append(y)
#print theta.value_at_point(l, Omega, deriv = [[1,1],[1,1],[1,1],[1,1]], batch=True)
print "Theta w/ Characteristic Test"
z = np.array([1.j,0])
Omega = np.matrix([[1.0j,-0.5],[-0.5,1.0j]])
deriv = [[1,0],[1,0]]
chars = [[0,0],[0,0]]
print theta.characteristic(chars, z, Omega, deriv)
print "Test #3"
import pylab as p
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
print "\tCalculating theta..."
fig1 = plt.figure()
ax = fig1.add_subplot(1,1,1)
SIZE = 128
x = np.linspace(0,1,SIZE)
y = np.linspace(0,5,SIZE)
X,Y = p.meshgrid(x,y)
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
Z = (V.reshape(SIZE,SIZE)).imag
print "\tPlotting..."
ax.contourf(X,Y,Z,7,antialiased=True)
fig1.show()
print "\tCalculating theta..."
fig2 = plt.figure()
ax = fig2.add_subplot(1,1,1)
SIZE = 512
x = np.linspace(-7,7,SIZE)
y = np.linspace(-7,7,SIZE)
X,Y = p.meshgrid(x,y)
Z = X + Y*1.j
Z = X + Y*1.j
Z = Z.flatten()
w = np.array([[1.j]])
print w
U,V = theta.exp_and_osc_at_point(Z, w, batch = True)
print theta._intpoints
Z = (V.reshape(SIZE,SIZE)).real
print "\tPlotting..."
ax.contourf(X,Y,Z,7,antialiased=True)
fig2.show()
|
mit
| -6,444,361,531,170,360,000
| 37.882784
| 250
| 0.583859
| false
| 3.479187
| false
| false
| false
|
clovertrail/cloudinit-bis
|
cloudinit/config/cc_ntp.py
|
1
|
4063
|
# vi: ts=4 expandtab
#
# Copyright (C) 2016 Canonical Ltd.
#
# Author: Ryan Harper <ryan.harper@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
NTP
---
**Summary:** enable and configure ntp
Handle ntp configuration. If ntp is not installed on the system and ntp
configuration is specified, ntp will be installed. If there is a default ntp
config file in the image or one is present in the distro's ntp package, it will
be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp
pools and ntp servers can be provided under the ``ntp`` config key. If no ntp
servers or pools are provided, 4 pools will be used in the format
``{0-3}.{distro}.pool.ntp.org``.
**Internal name:** ``cc_ntp``
**Module frequency:** per instance
**Supported distros:** centos, debian, fedora, opensuse, ubuntu
**Config keys**::
ntp:
pools:
- 0.company.pool.ntp.org
- 1.company.pool.ntp.org
- ntp.myorg.org
servers:
- my.ntp.server.local
- ntp.ubuntu.com
- 192.168.23.2
"""
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
from cloudinit import type_utils
from cloudinit import util
import os
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
def handle(name, cfg, cloud, log, _args):
"""
Enable and configure ntp
ntp:
pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org']
servers: ['192.168.2.1']
"""
ntp_cfg = cfg.get('ntp', {})
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(("'ntp' key existed in config,"
" but not a dictionary type,"
" is a %s %instead"), type_utils.obj_name(ntp_cfg))
if 'ntp' not in cfg:
LOG.debug("Skipping module named %s,"
"not present or disabled by cfg", name)
return True
install_ntp(cloud.distro.install_packages, packages=['ntp'],
check_exe="ntpd")
rename_ntp_conf()
write_ntp_config_template(ntp_cfg, cloud)
def install_ntp(install_func, packages=None, check_exe="ntpd"):
if util.which(check_exe):
return
if packages is None:
packages = ['ntp']
install_func(packages)
def rename_ntp_conf(config=NTP_CONF):
if os.path.exists(config):
util.rename(config, config + ".dist")
def generate_server_names(distro):
names = []
for x in range(0, NR_POOL_SERVERS):
name = "%d.%s.pool.ntp.org" % (x, distro)
names.append(name)
return names
def write_ntp_config_template(cfg, cloud):
servers = cfg.get('servers', [])
pools = cfg.get('pools', [])
if len(servers) == 0 and len(pools) == 0:
LOG.debug('Adding distro default ntp pool servers')
pools = generate_server_names(cloud.distro.name)
params = {
'servers': servers,
'pools': pools,
}
template_fn = cloud.get_template_filename('ntp.conf.%s' %
(cloud.distro.name))
if not template_fn:
template_fn = cloud.get_template_filename('ntp.conf')
if not template_fn:
raise RuntimeError(("No template found, "
"not rendering %s"), NTP_CONF)
templater.render_to_file(template_fn, NTP_CONF, params)
|
gpl-3.0
| -5,945,354,172,292,923,000
| 28.442029
| 79
| 0.624169
| false
| 3.657066
| true
| false
| false
|
lexotero/try-it
|
apps/congress/admin.py
|
1
|
1496
|
from django.contrib import admin
from apps.congress.models import Edition, Company, Speaker, Tag, Track, ActivityFormat, Activity
class TagAdmin(admin.ModelAdmin):
search_fields = ["name"]
class EditionAdmin(admin.ModelAdmin):
list_display = ["start", "end", "name", "description"]
search_fields = ["name", "description"]
class TrackAdmin(admin.ModelAdmin):
list_display = ["name", "description"]
search_fields = ["name", "description"]
class CompanyAdmin(admin.ModelAdmin):
list_display = ["name"]
search_fields = ["name", "description"]
class SpeakerAdmin(admin.ModelAdmin):
list_display = ["first_name", "last_name"]
list_filter = ["company"]
search_fields = ["first_name", "last_name"]
class ActivityFormatAdmin(admin.ModelAdmin):
list_display = ["name", "description"]
search_fields = ["name", "description"]
class ActivityAdmin(admin.ModelAdmin):
filter_horizontal = ["tags", "speakers", "companies"]
list_display = ["id", "title", "start", "end", "format", "track"]
list_filter = ["format", "tags", "edition", "track"]
search_fields = ["title", "description", "tags", "format", "speakers", "companies"]
admin.site.register(Tag, TagAdmin)
admin.site.register(Edition, EditionAdmin)
admin.site.register(Track, TrackAdmin)
admin.site.register(Company, CompanyAdmin)
admin.site.register(Speaker, SpeakerAdmin)
admin.site.register(ActivityFormat, ActivityFormatAdmin)
admin.site.register(Activity, ActivityAdmin)
|
apache-2.0
| -4,323,105,958,972,232,700
| 29.530612
| 96
| 0.701203
| false
| 3.596154
| false
| false
| false
|
miing/mci_migo
|
identityprovider/tests/openid_server/per_version/test_openid_teams.py
|
1
|
1669
|
from identityprovider.const import LAUNCHPAD_TEAMS_NS
from identityprovider.tests.helpers import OpenIDTestCase
class OpenIDTeamsTestCase(OpenIDTestCase):
def test(self):
# = Launchpad OpenID Teams Extension =
# The Launchpad OpenID server implements a custom team membership
# extension. This allows a relying party to query whether the user is
# a member of one or more teams.
# Now perform an OpenID authentication request, querying membership in
# four team names:
# * one that the user is a member of
# * one that does not exist
# * one that does exist but the user is not a member of
# * one that is actually the user's name
t = self.factory.make_team('ubuntu-team')
self.factory.add_account_to_team(self.account, t)
self.factory.make_team('launchpad-beta-testers')
teams = ('ubuntu-team,no-such-team,launchpad-beta-testers,%s' %
self.account.person.name)
response = self.do_openid_dance(self.claimed_id, teams=teams)
response = self.login(response)
# authorize sending team membership
response = self.yes_to_decide(response, teams=('ubuntu-team',))
info = self.complete_from_response(response)
self.assertEqual(info.status, 'success')
self.assertEqual(info.getSigned(LAUNCHPAD_TEAMS_NS, 'is_member'),
'ubuntu-team')
# The response reveals that the user is a member of the ubuntu-team.
# As specified, there is no difference in the response for non-existent
# teams and teams that the user is not a member of.
|
agpl-3.0
| -9,369,407,362,418,992
| 40.725
| 79
| 0.65728
| false
| 4.080685
| true
| false
| false
|
lixun910/pysal
|
pysal/viz/splot/_viz_esda_mpl.py
|
1
|
44623
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import geopandas as gpd
import numpy as np
from pysal.lib.weights.contiguity import Queen
from pysal.lib.weights.spatial_lag import lag_spatial
import seaborn as sbn
from pysal.explore.esda.moran import (Moran_Local, Moran_Local_BV,
Moran, Moran_BV)
import warnings
from pysal.model.spreg import OLS
from matplotlib import patches, colors
from ._viz_utils import (mask_local_auto, moran_hot_cold_spots,
splot_colors)
"""
Lightweight visualizations for esda using Matplotlib and Geopandas
TODO
* geopandas plotting, change round shapes in legends to boxes
* prototype moran_facet using `seaborn.FacetGrid`
"""
__author__ = ("Stefanie Lumnitz <stefanie.lumitz@gmail.com>")
def _create_moran_fig_ax(ax, figsize):
"""
Creates matplotlib figure and axes instances
for plotting moran visualizations. Adds common viz design.
"""
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
ax.spines['left'].set_position(('axes', -0.05))
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.05))
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
return fig, ax
def moran_scatterplot(moran, zstandard=True, p=None, ax=None,
scatter_kwds=None, fitline_kwds=None):
"""
Moran Scatterplot
Parameters
----------
moran : esda.moran instance
Values of Moran's I Global, Bivariate and Local
Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
p : float, optional
If given, the p-value threshold for significance
for Local Autocorrelation analysis. Points will be colored by
significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import (Moran, Moran_BV,
... Moran_Local, Moran_Local_BV)
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate esda.moran Objects
>>> moran = Moran(y, w)
>>> moran_bv = Moran_BV(y, x, w)
>>> moran_loc = Moran_Local(y, w)
>>> moran_loc_bv = Moran_Local_BV(y, x, w)
Plot
>>> fig, axs = plt.subplots(2, 2, figsize=(10,10),
... subplot_kw={'aspect': 'equal'})
>>> moran_scatterplot(moran, p=0.05, ax=axs[0,0])
>>> moran_scatterplot(moran_loc, p=0.05, ax=axs[1,0])
>>> moran_scatterplot(moran_bv, p=0.05, ax=axs[0,1])
>>> moran_scatterplot(moran_loc_bv, p=0.05, ax=axs[1,1])
>>> plt.show()
"""
if isinstance(moran, Moran):
if p is not None:
warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\n'
'or `Moran_Local_BV` objects')
fig, ax = _moran_global_scatterplot(moran=moran, zstandard=zstandard,
ax=ax, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_BV):
if p is not None:
warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\n'
'or `Moran_Local_BV` objects')
fig, ax = _moran_bv_scatterplot(moran_bv=moran, ax=ax,
scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_Local):
fig, ax = _moran_loc_scatterplot(moran_loc=moran, zstandard=zstandard,
ax=ax, p=p, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_Local_BV):
fig, ax = _moran_loc_bv_scatterplot(moran_loc_bv=moran, ax=ax,
p=p, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return fig, ax
def _moran_global_scatterplot(moran, zstandard=True, ax=None,
scatter_kwds=None, fitline_kwds=None):
"""
Global Moran's I Scatterplot.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> moran_scatterplot(moran)
>>> plt.show()
customize plot
>>> fig, ax = moran_scatterplot(moran, zstandard=False,
... fitline_kwds=dict(color='#4393c3'))
>>> ax.set_xlabel('Donations')
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
# define customization defaults
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('color', splot_colors['moran_base'])
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7, 7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran Scatterplot' +
' (' + str(round(moran.I, 2)) + ')')
# plot and set standards
if zstandard is True:
lag = lag_spatial(moran.w, moran.z)
fit = OLS(moran.z[:, None], lag[:, None])
# plot
ax.scatter(moran.z, lag, **scatter_kwds)
ax.plot(lag, fit.predy, **fitline_kwds)
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
else:
lag = lag_spatial(moran.w, moran.y)
b, a = np.polyfit(moran.y, lag, 1)
# plot
ax.scatter(moran.y, lag, **scatter_kwds)
ax.plot(moran.y, a + b*moran.y, **fitline_kwds)
# dashed vert at mean of the attribute
ax.vlines(moran.y.mean(), lag.min(), lag.max(), alpha=0.5,
linestyle='--')
# dashed horizontal at mean of lagged attribute
ax.hlines(lag.mean(), moran.y.min(), moran.y.max(), alpha=0.5,
linestyle='--')
return fig, ax
def plot_moran_simulation(moran, ax=None, fitline_kwds=None, **kwargs):
"""
Global Moran's I simulated reference distribution.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the
vertical moran fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborn.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Simulated reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import plot_moran_simulation
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> plot_moran_simulation(moran)
>>> plt.show()
customize plot
>>> plot_moran_simulation(moran, fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if fitline_kwds is None:
fitline_kwds = dict()
figsize = kwargs.pop('figsize', (7, 7))
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize)
# plot distribution
shade = kwargs.pop('shade', True)
color = kwargs.pop('color', splot_colors['moran_base'])
sbn.kdeplot(moran.sim, shade=shade, color=color, ax=ax, **kwargs)
# customize plot
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.vlines(moran.I, 0, 1, **fitline_kwds)
ax.vlines(moran.EI, 0, 1)
ax.set_title('Reference Distribution')
ax.set_xlabel('Moran I: ' + str(round(moran.I, 2)))
return fig, ax
def plot_moran(moran, zstandard=True, scatter_kwds=None,
fitline_kwds=None, **kwargs):
"""
Global Moran's I simulated reference distribution and scatterplot.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
and vertical fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot and reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import plot_moran
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> plot_moran(moran)
>>> plt.show()
customize plot
>>> plot_moran(moran, zstandard=False,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
figsize = kwargs.pop('figsize', (10, 4))
fig, axs = plt.subplots(1, 2, figsize=figsize,
subplot_kw={'aspect': 'equal'})
plot_moran_simulation(moran, ax=axs[0], fitline_kwds=fitline_kwds, **kwargs)
moran_scatterplot(moran, zstandard=zstandard, ax=axs[1],
scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)
axs[0].set(aspect="auto")
axs[1].set(aspect="auto")
return fig, axs
def _moran_bv_scatterplot(moran_bv, ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Bivariate Moran Scatterplot.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> moran_scatterplot(moran_bv)
>>> plt.show()
customize plot
>>> moran_scatterplot(moran_bv,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('color', splot_colors['moran_base'])
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute X')
ax.set_ylabel('Spatial Lag of Y')
ax.set_title('Bivariate Moran Scatterplot' +
' (' + str(round(moran_bv.I, 2)) + ')')
# plot and set standards
lag = lag_spatial(moran_bv.w, moran_bv.zy)
fit = OLS(moran_bv.zy[:, None], lag[:, None])
# plot
ax.scatter(moran_bv.zx, lag, **scatter_kwds)
ax.plot(lag, fit.predy, **fitline_kwds)
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
return fig, ax
def plot_moran_bv_simulation(moran_bv, ax=None, fitline_kwds=None, **kwargs):
"""
Bivariate Moran's I simulated reference distribution.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the
vertical moran fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import plot_moran_bv_simulation
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> plot_moran_bv_simulation(moran_bv)
>>> plt.show()
customize plot
>>> plot_moran_bv_simulation(moran_bv,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if fitline_kwds is None:
fitline_kwds = dict()
figsize = kwargs.pop('figsize', (7, 7))
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize)
# plot distribution
shade = kwargs.pop('shade', True)
color = kwargs.pop('color', splot_colors['moran_base'])
sbn.kdeplot(moran_bv.sim, shade=shade, color=color, ax=ax, **kwargs)
# customize plot
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.vlines(moran_bv.I, 0, 1, **fitline_kwds)
ax.vlines(moran_bv.EI_sim, 0, 1)
ax.set_title('Reference Distribution')
ax.set_xlabel('Bivariate Moran I: ' + str(round(moran_bv.I, 2)))
return fig, ax
def plot_moran_bv(moran_bv, scatter_kwds=None, fitline_kwds=None, **kwargs):
"""
Bivariate Moran's I simulated reference distribution and scatterplot.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
and vertical fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran scatterplot and reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import plot_moran_bv
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> plot_moran_bv(moran_bv)
>>> plt.show()
customize plot
>>> plot_moran_bv(moran_bv, fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
figsize = kwargs.pop('figsize', (10, 4))
fig, axs = plt.subplots(1, 2, figsize=figsize,
subplot_kw={'aspect': 'equal'})
plot_moran_bv_simulation(moran_bv, ax=axs[0], fitline_kwds=fitline_kwds,
**kwargs)
moran_scatterplot(moran_bv, ax=axs[1],scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
axs[0].set(aspect="auto")
axs[1].set(aspect="auto")
return fig, axs
def _moran_loc_scatterplot(moran_loc, zstandard=True, p=None,
ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Moran Scatterplot with option of coloring of Local Moran Statistics
Parameters
----------
moran_loc : esda.moran.Moran_Local instance
Values of Moran's I Local Autocorrelation Statistics
p : float, optional
If given, the p-value threshold for significance. Points will
be colored by significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran Local scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import geopandas as gpd
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate Moran Local statistics
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> m = Moran_Local(y, w)
plot
>>> moran_scatterplot(m)
>>> plt.show()
customize plot
>>> moran_scatterplot(m, p=0.05,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
if p is not None:
if not isinstance(moran_loc, Moran_Local):
raise ValueError("`moran_loc` is not a\n " +
"esda.moran.Moran_Local instance")
if 'color' in scatter_kwds or 'c' in scatter_kwds or 'cmap' in scatter_kwds:
warnings.warn('To change the color use cmap with a colormap of 5,\n' +
' color defines the LISA category')
# colors
spots = moran_hot_cold_spots(moran_loc, p)
hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',
'#2c7bb6', '#fdae61'])
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran Local Scatterplot')
# plot and set standards
if zstandard is True:
lag = lag_spatial(moran_loc.w, moran_loc.z)
fit = OLS(moran_loc.z[:, None], lag[:, None])
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots)
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc.z, fit.predy,
**scatter_kwds)
else:
scatter_kwds.setdefault('color', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc.z, fit.predy, **scatter_kwds)
else:
lag = lag_spatial(moran_loc.w, moran_loc.y)
b, a = np.polyfit(moran_loc.y, lag, 1)
# dashed vert at mean of the attribute
ax.vlines(moran_loc.y.mean(), lag.min(), lag.max(), alpha=0.5,
linestyle='--')
# dashed horizontal at mean of lagged attribute
ax.hlines(lag.mean(), moran_loc.y.min(), moran_loc.y.max(), alpha=0.5,
linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots)
ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)
ax.scatter(moran_loc.y, lag, **scatter_kwds)
else:
scatter_kwds.setdefault('c', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)
ax.scatter(moran_loc.y, lag, **scatter_kwds)
return fig, ax
def lisa_cluster(moran_loc, gdf, p=0.05, ax=None,
legend=True, legend_kwds=None, **kwargs):
"""
Create a LISA Cluster map
Parameters
----------
moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance
Values of Moran's Local Autocorrelation Statistic
gdf : geopandas dataframe instance
The Dataframe containing information to plot. Note that `gdf` will be
modified, so calling functions should use a copy of the user
provided `gdf`. (either using gdf.assign() or gdf.copy())
p : float, optional
The p-value threshold for significance. Points will
be colored by significance.
ax : matplotlib Axes instance, optional
Axes in which to plot the figure in multiple Axes layout.
Default = None
legend : boolean, optional
If True, legend for maps will be depicted. Default = True
legend_kwds : dict, optional
Dictionary to control legend formatting options. Example:
``legend_kwds={'loc': 'upper left', 'bbox_to_anchor': (0.92, 1.05)}``
Default = None
**kwargs : keyword arguments, optional
Keywords designing and passed to geopandas.GeoDataFrame.plot().
Returns
-------
fig : matplotlip Figure instance
Figure of LISA cluster map
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import lisa_cluster
Data preparation and statistical analysis
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> moran_loc = Moran_Local(y, w)
Plotting
>>> fig = lisa_cluster(moran_loc, gdf)
>>> plt.show()
"""
# retrieve colors5 and labels from mask_local_auto
_, colors5, _, labels = mask_local_auto(moran_loc, p=p)
# define ListedColormap
hmap = colors.ListedColormap(colors5)
if ax is None:
figsize = kwargs.pop('figsize', None)
fig, ax = plt.subplots(1, figsize=figsize)
else:
fig = ax.get_figure()
gdf.assign(cl=labels).plot(column='cl', categorical=True,
k=2, cmap=hmap, linewidth=0.1, ax=ax,
edgecolor='white', legend=legend,
legend_kwds=legend_kwds, **kwargs)
ax.set_axis_off()
ax.set_aspect('equal')
return fig, ax
def plot_local_autocorrelation(moran_loc, gdf, attribute, p=0.05,
region_column=None, mask=None,
mask_color='#636363', quadrant=None,
legend=True, scheme='Quantiles',
cmap='YlGnBu', figsize=(15, 4),
scatter_kwds=None, fitline_kwds=None):
'''
Produce three-plot visualisation of Moran Scatteprlot, LISA cluster
and Choropleth maps, with Local Moran region and quadrant masking
Parameters
----------
moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance
Values of Moran's Local Autocorrelation Statistic
gdf : geopandas dataframe
The Dataframe containing information to plot the two maps.
attribute : str
Column name of attribute which should be depicted in Choropleth map.
p : float, optional
The p-value threshold for significance. Points and polygons will
be colored by significance. Default = 0.05.
region_column: string, optional
Column name containing mask region of interest. Default = None
mask: str, optional
Identifier or name of the region to highlight. Default = None
mask_color: str, optional
Color of mask. Default = '#636363'
quadrant : int, optional
Quadrant 1-4 in scatterplot masking values in LISA cluster and
Choropleth maps. Default = None
figsize: tuple, optional
W, h of figure. Default = (15,4)
legend: boolean, optional
If True, legend for maps will be depicted. Default = True
scheme: str, optional
Name of PySAL classifier to be used. Default = 'Quantiles'
cmap: str, optional
Name of matplotlib colormap used for plotting the Choropleth.
Default = 'YlGnBu'
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
in the scatterplot. Default =None.
Returns
-------
fig : Matplotlib figure instance
Moran Scatterplot, LISA cluster map and Choropleth.
axs : list of Matplotlib axes
Lisat of Matplotlib axes plotted.
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import plot_local_autocorrelation
Data preparation and analysis
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> moran_loc = Moran_Local(y, w)
Plotting with quadrant mask and region mask
>>> fig = plot_local_autocorrelation(moran_loc, gdf, 'Donatns', p=0.05,
... region_column='Dprtmnt',
... mask=['Ain'], quadrant=1)
>>> plt.show()
'''
fig, axs = plt.subplots(1, 3, figsize=figsize,
subplot_kw={'aspect': 'equal'})
# Moran Scatterplot
moran_scatterplot(moran_loc, p=p, ax=axs[0],
scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)
axs[0].set_aspect('auto')
# Lisa cluster map
# TODO: Fix legend_kwds: display boxes instead of points
lisa_cluster(moran_loc, gdf, p=p, ax=axs[1], legend=legend,
legend_kwds={'loc': 'upper left',
'bbox_to_anchor': (0.92, 1.05)})
axs[1].set_aspect('equal')
# Choropleth for attribute
gdf.plot(column=attribute, scheme=scheme, cmap=cmap,
legend=legend, legend_kwds={'loc': 'upper left',
'bbox_to_anchor': (0.92, 1.05)},
ax=axs[2], alpha=1)
axs[2].set_axis_off()
axs[2].set_aspect('equal')
# MASKING QUADRANT VALUES
if quadrant is not None:
# Quadrant masking in Scatterplot
mask_angles = {1: 0, 2: 90, 3: 180, 4: 270} # rectangle angles
# We don't want to change the axis data limits, so use the current ones
xmin, xmax = axs[0].get_xlim()
ymin, ymax = axs[0].get_ylim()
# We are rotating, so we start from 0 degrees and
# figured out the right dimensions for the rectangles for other angles
mask_width = {1: abs(xmax),
2: abs(ymax),
3: abs(xmin),
4: abs(ymin)}
mask_height = {1: abs(ymax),
2: abs(xmin),
3: abs(ymin),
4: abs(xmax)}
axs[0].add_patch(patches.Rectangle((0, 0), width=mask_width[quadrant],
height=mask_height[quadrant],
angle=mask_angles[quadrant],
color='#E5E5E5', zorder=-1, alpha=0.8))
# quadrant selection in maps
non_quadrant = ~(moran_loc.q == quadrant)
mask_quadrant = gdf[non_quadrant]
df_quadrant = gdf.iloc[~non_quadrant]
union2 = df_quadrant.unary_union.boundary
# LISA Cluster mask and cluster boundary
with warnings.catch_warnings(): # temorarily surpress geopandas warning
warnings.filterwarnings('ignore', category=UserWarning)
mask_quadrant.plot(column=attribute, scheme=scheme, color='white',
ax=axs[1], alpha=0.7, zorder=1)
gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[1], color='#E5E5E5')
# CHOROPLETH MASK
with warnings.catch_warnings(): # temorarily surpress geopandas warning
warnings.filterwarnings('ignore', category=UserWarning)
mask_quadrant.plot(column=attribute, scheme=scheme, color='white',
ax=axs[2], alpha=0.7, zorder=1)
gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[2], color='#E5E5E5')
# REGION MASKING
if region_column is not None:
# masking inside axs[0] or Moran Scatterplot
ix = gdf[region_column].isin(mask)
df_mask = gdf[ix]
x_mask = moran_loc.z[ix]
y_mask = lag_spatial(moran_loc.w, moran_loc.z)[ix]
axs[0].plot(x_mask, y_mask, color=mask_color, marker='o',
markersize=14, alpha=.8, linestyle="None", zorder=-1)
# masking inside axs[1] or Lisa cluster map
union = df_mask.unary_union.boundary
gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[1], color=mask_color)
# masking inside axs[2] or Chloropleth
gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[2], color=mask_color)
return fig, axs
def _moran_loc_bv_scatterplot(moran_loc_bv, p=None,
ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Moran Bivariate Scatterplot with option of coloring of Local Moran Statistics
Parameters
----------
moran_loc : esda.moran.Moran_Local_BV instance
Values of Moran's I Local Autocorrelation Statistics
p : float, optional
If given, the p-value threshold for significance. Points will
be colored by significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate Moran Local scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import geopandas as gpd
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> from pysal.explore.esda.moran import Moran_Local_BV
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate Moran Local statistics
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> m = Moran_Local_BV(x, y, w)
Plot
>>> moran_scatterplot(m)
>>> plt.show()
Customize plot
>>> moran_scatterplot(m, p=0.05,
... fitline_kwds=dict(color='#4393c3')))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
if p is not None:
if not isinstance(moran_loc_bv, Moran_Local_BV):
raise ValueError("`moran_loc_bv` is not a\n" +
"esda.moran.Moran_Local_BV instance")
if 'color' in scatter_kwds or 'cmap' in scatter_kwds:
warnings.warn("To change the color use cmap with a colormap of 5,\n" +
"c defines the LISA category, color will interfere with c")
# colors
spots_bv = moran_hot_cold_spots(moran_loc_bv, p)
hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',
'#2c7bb6', '#fdae61'])
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran BV Local Scatterplot')
# plot and set standards
lag = lag_spatial(moran_loc_bv.w, moran_loc_bv.zy)
fit = OLS(moran_loc_bv.zy[:, None], lag[:, None])
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots_bv)
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc_bv.zx, fit.predy,
**scatter_kwds)
else:
scatter_kwds.setdefault('color', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc_bv.zy, fit.predy, **scatter_kwds)
return fig, ax
def moran_facet(moran_matrix, figsize=(16,12),
scatter_bv_kwds=None, fitline_bv_kwds=None,
scatter_glob_kwds=dict(color='#737373'), fitline_glob_kwds=None):
"""
Moran Facet visualization.
Includes BV Morans and Global Morans on the diagonal.
Parameters
----------
moran_matrix : esda.moran.Moran_BV_matrix instance
Dictionary of Moran_BV objects
figsize : tuple, optional
W, h of figure. Default =(16,12)
scatter_bv_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points of
off-diagonal Moran_BV plots.
Default =None.
fitline_bv_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline of
off-diagonal Moran_BV plots.
Default =None.
scatter_glob_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points of
diagonal Moran plots.
Default =None.
fitline_glob_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline of
diagonal Moran plots.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate Moran Local scatterplot figure
axarr : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import pysal.lib as lp
>>> import numpy as np
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV_matrix
>>> from pysal.viz.splot.esda import moran_facet
Load data and calculate Moran Local statistics
>>> f = gpd.read_file(lp.examples.get_path("sids2.dbf"))
>>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
>>> vars = [np.array(f[var]) for var in varnames]
>>> w = lp.io.open(lp.examples.get_path("sids2.gal")).read()
>>> moran_matrix = Moran_BV_matrix(vars, w, varnames = varnames)
Plot
>>> fig, axarr = moran_facet(moran_matrix)
>>> plt.show()
Customize plot
>>> fig, axarr = moran_facet(moran_matrix,
... fitline_bv_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
nrows = int(np.sqrt(len(moran_matrix))) + 1
ncols = nrows
fig, axarr = plt.subplots(nrows, ncols, figsize=figsize,
sharey=True, sharex=True)
fig.suptitle('Moran Facet')
for row in range(nrows):
for col in range(ncols):
if row == col:
global_m = Moran(moran_matrix[row, (row+1) % 4].zy,
moran_matrix[row, (row+1) % 4].w)
_moran_global_scatterplot(global_m, ax= axarr[row,col],
scatter_kwds=scatter_glob_kwds,
fitline_kwds=fitline_glob_kwds)
axarr[row, col].set_facecolor('#d9d9d9')
else:
_moran_bv_scatterplot(moran_matrix[row,col],
ax=axarr[row,col],
scatter_kwds=scatter_bv_kwds,
fitline_kwds=fitline_bv_kwds)
axarr[row, col].spines['bottom'].set_visible(False)
axarr[row, col].spines['left'].set_visible(False)
if row == nrows - 1:
axarr[row, col].set_xlabel(str(
moran_matrix[(col+1)%4, col].varnames['x']).format(col))
axarr[row, col].spines['bottom'].set_visible(True)
else:
axarr[row, col].set_xlabel('')
if col == 0:
axarr[row, col].set_ylabel(('Spatial Lag of '+str(
moran_matrix[row, (row+1)%4].varnames['y'])).format(row))
axarr[row, col].spines['left'].set_visible(True)
else:
axarr[row, col].set_ylabel('')
axarr[row, col].set_title('')
plt.tight_layout()
return fig, axarr
|
bsd-3-clause
| 2,072,672,532,014,773,200
| 33.943618
| 85
| 0.595164
| false
| 3.581588
| false
| false
| false
|
rrrrrr8/vnpy
|
vnpy/api/lbank/test.py
|
1
|
1536
|
# encoding: UTF-8
from six.moves import input
from time import time
from vnlbank import LbankRestApi, LbankWebsocketApi
API_KEY = '132a36ce-ad1c-409a-b48c-09b7877ae49b'
SECRET_KEY = '319320BF875297E7F4050E1195B880E8'
#----------------------------------------------------------------------
def restTest():
""""""
# 创建API对象并初始化
api = LbankRestApi()
api.init(API_KEY, SECRET_KEY)
api.start(1)
# 测试
#api.addReq('GET', '/currencyPairs.do', {}, api.onData)
#api.addReq('GET', '/accuracy.do', {}, api.onData)
#api.addReq('GET', '/ticker.do', {'symbol': 'eth_btc'}, api.onData)
#api.addReq('GET', '/depth.do', {'symbol': 'eth_btc', 'size': '5'}, api.onData)
#api.addReq('post', '/user_info.do', {}, api.onData)
req = {
'symbol': 'sc_btc',
'current_page': '1',
'page_length': '50'
}
api.addReq('POST', '/orders_info_no_deal.do', req, api.onData)
# 阻塞
input()
#----------------------------------------------------------------------
def wsTest():
""""""
ws = LbankWebsocketApi()
ws.start()
channels = [
'lh_sub_spot_eth_btc_depth_20',
'lh_sub_spot_eth_btc_trades',
'lh_sub_spot_eth_btc_ticker'
]
for channel in channels:
req = {
'event': 'addChannel',
'channel': channel
}
ws.sendReq(req)
# 阻塞
input()
if __name__ == '__main__':
restTest()
#wsTest()
|
mit
| 5,003,571,435,686,934,000
| 21.522388
| 83
| 0.479443
| false
| 3.016
| false
| false
| false
|
crossroadchurch/paul
|
openlp/plugins/songs/forms/editsongform.py
|
1
|
60260
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.plugins.songs.forms.editsongform` module contains the form
used to edit songs.
"""
import logging
import re
import os
import shutil
from PyQt4 import QtCore, QtGui
from openlp.core.common import Registry, RegistryProperties, AppLocation, UiStrings, check_directory_exists, translate
from openlp.core.lib import FileDialog, PluginStatus, MediaType, create_separated_list
from openlp.core.lib.ui import set_case_insensitive_completer, critical_error_message_box, find_and_set_in_combo_box
from openlp.plugins.songs.lib import VerseType, clean_song
from openlp.plugins.songs.lib.db import Book, Song, Author, AuthorType, Topic, MediaFile
from openlp.plugins.songs.lib.ui import SongStrings
from openlp.plugins.songs.lib.openlyricsxml import SongXML
from openlp.plugins.songs.forms.editsongdialog import Ui_EditSongDialog
from openlp.plugins.songs.forms.editverseform import EditVerseForm
from openlp.plugins.songs.forms.editversechordsform import EditVerseChordsForm
from openlp.plugins.songs.forms.mediafilesform import MediaFilesForm
from openlp.plugins.songs.lib.chords import Chords
log = logging.getLogger(__name__)
class EditSongForm(QtGui.QDialog, Ui_EditSongDialog, RegistryProperties):
"""
Class to manage the editing of a song
"""
log.info('%s EditSongForm loaded', __name__)
def __init__(self, media_item, parent, manager):
"""
Constructor
"""
super(EditSongForm, self).__init__(parent)
self.media_item = media_item
self.song = None
# can this be automated?
self.width = 400
self.setupUi(self)
# Connecting signals and slots
self.song_key_edit.currentIndexChanged.connect(self.on_key_or_transpose_change)
self.transpose_edit.valueChanged.connect(self.on_key_or_transpose_change)
self.author_add_button.clicked.connect(self.on_author_add_button_clicked)
self.author_edit_button.clicked.connect(self.on_author_edit_button_clicked)
self.author_remove_button.clicked.connect(self.on_author_remove_button_clicked)
self.authors_list_view.itemClicked.connect(self.on_authors_list_view_clicked)
self.topic_add_button.clicked.connect(self.on_topic_add_button_clicked)
self.topic_remove_button.clicked.connect(self.on_topic_remove_button_clicked)
self.topics_list_view.itemClicked.connect(self.on_topic_list_view_clicked)
self.copyright_insert_button.clicked.connect(self.on_copyright_insert_button_triggered)
self.verse_add_button.clicked.connect(self.on_verse_add_button_clicked)
self.verse_list_widget.doubleClicked.connect(self.on_verse_edit_all_chords_button_clicked)
self.verse_edit_chords_button.clicked.connect(self.on_verse_edit_chords_button_clicked)
self.verse_edit_all_chords_button.clicked.connect(self.on_verse_edit_all_chords_button_clicked)
self.verse_delete_button.clicked.connect(self.on_verse_delete_button_clicked)
self.verse_list_widget.itemClicked.connect(self.on_verse_list_view_clicked)
self.verse_order_edit.textChanged.connect(self.on_verse_order_text_changed)
self.theme_add_button.clicked.connect(self.theme_manager.on_add_theme)
self.maintenance_button.clicked.connect(self.on_maintenance_button_clicked)
self.from_file_button.clicked.connect(self.on_audio_add_from_file_button_clicked)
self.from_media_button.clicked.connect(self.on_audio_add_from_media_button_clicked)
self.audio_remove_button.clicked.connect(self.on_audio_remove_button_clicked)
self.audio_remove_all_button.clicked.connect(self.on_audio_remove_all_button_clicked)
Registry().register_function('theme_update_list', self.load_themes)
self.preview_button = QtGui.QPushButton()
self.preview_button.setObjectName('preview_button')
self.preview_button.setText(UiStrings().SaveAndPreview)
self.button_box.addButton(self.preview_button, QtGui.QDialogButtonBox.ActionRole)
self.button_box.clicked.connect(self.on_preview)
# Create other objects and forms
self.manager = manager
self.verse_form = EditVerseForm(self)
self.verse_chords_form = EditVerseChordsForm(self)
self.media_form = MediaFilesForm(self)
self.initialise()
self.authors_list_view.setSortingEnabled(False)
self.authors_list_view.setAlternatingRowColors(True)
self.topics_list_view.setSortingEnabled(False)
self.topics_list_view.setAlternatingRowColors(True)
self.audio_list_widget.setAlternatingRowColors(True)
self.find_verse_split = re.compile('---\[\]---\n', re.UNICODE)
self.whitespace = re.compile(r'\W+', re.UNICODE)
self.find_tags = re.compile(u'\{/?\w+\}', re.UNICODE)
def _load_objects(self, cls, combo, cache):
"""
Generically load a set of objects into a cache and a combobox.
"""
objects = self.manager.get_all_objects(cls, order_by_ref=cls.name)
combo.clear()
combo.addItem('')
for obj in objects:
row = combo.count()
combo.addItem(obj.name)
cache.append(obj.name)
combo.setItemData(row, obj.id)
set_case_insensitive_completer(cache, combo)
def _add_author_to_list(self, author, author_type):
"""
Add an author to the author list.
"""
author_item = QtGui.QListWidgetItem(author.get_display_name(author_type))
author_item.setData(QtCore.Qt.UserRole, (author.id, author_type))
self.authors_list_view.addItem(author_item)
def _extract_verse_order(self, verse_order):
"""
Split out the verse order
:param verse_order: The starting verse order
:return: revised order
"""
order = []
order_names = str(verse_order).split()
for item in order_names:
if len(item) == 1:
verse_index = VerseType.from_translated_tag(item, None)
if verse_index is not None:
order.append(VerseType.tags[verse_index] + '1')
else:
# it matches no verses anyway
order.append('')
else:
verse_index = VerseType.from_translated_tag(item[0], None)
if verse_index is None:
# it matches no verses anyway
order.append('')
else:
verse_tag = VerseType.tags[verse_index]
verse_num = item[1:].lower()
order.append(verse_tag + verse_num)
return order
def _validate_verse_list(self, verse_order, verse_count):
"""
Check the verse order list has valid verses
:param verse_order: Verse order
:param verse_count: number of verses
:return: Count of invalid verses
"""
verses = []
invalid_verses = []
verse_names = []
order_names = str(verse_order).split()
order = self._extract_verse_order(verse_order)
for index in range(verse_count):
verse = self.verse_list_widget.item(index, 0)
verse = verse.data(QtCore.Qt.UserRole)
if verse not in verse_names:
verses.append(verse)
verse_names.append('%s%s' % (VerseType.translated_tag(verse[0]), verse[1:]))
for count, item in enumerate(order):
if item not in verses:
invalid_verses.append(order_names[count])
if invalid_verses:
valid = create_separated_list(verse_names)
if len(invalid_verses) > 1:
msg = translate('SongsPlugin.EditSongForm', 'There are no verses corresponding to "%(invalid)s".'
'Valid entries are %(valid)s.\nPlease enter the verses separated by spaces.') % \
{'invalid': ', '.join(invalid_verses), 'valid': valid}
else:
msg = translate('SongsPlugin.EditSongForm', 'There is no verse corresponding to "%(invalid)s".'
'Valid entries are %(valid)s.\nPlease enter the verses separated by spaces.') % \
{'invalid': invalid_verses[0], 'valid': valid}
critical_error_message_box(title=translate('SongsPlugin.EditSongForm', 'Invalid Verse Order'),
message=msg)
return len(invalid_verses) == 0
def _validate_song(self):
"""
Check the validity of the song.
"""
# This checks data in the form *not* self.song. self.song is still
# None at this point.
log.debug('Validate Song')
# Lets be nice and assume the data is correct.
if not self.title_edit.text():
self.song_tab_widget.setCurrentIndex(0)
self.title_edit.setFocus()
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'You need to type in a song title.'))
return False
if self.verse_list_widget.rowCount() == 0:
self.song_tab_widget.setCurrentIndex(0)
self.verse_list_widget.setFocus()
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'You need to type in at least one verse.'))
return False
if(''.join(self.chords_lyrics_list).find('@') != -1) and (self.song_key_edit.currentIndex() == -1):
# Song has chords but no key
critical_error_message_box('SongsPlugin.EditSongForm', 'You need to choose a key for the song.')
return False
if self.authors_list_view.count() == 0:
self.song_tab_widget.setCurrentIndex(1)
self.authors_list_view.setFocus()
critical_error_message_box(message=translate('SongsPlugin.EditSongForm',
'You need to have an author for this song.'))
return False
if self.verse_order_edit.text():
result = self._validate_verse_list(self.verse_order_edit.text(), self.verse_list_widget.rowCount())
if not result:
return False
text = self.song_book_combo_box.currentText()
if self.song_book_combo_box.findText(text, QtCore.Qt.MatchExactly) < 0:
if QtGui.QMessageBox.question(
self, translate('SongsPlugin.EditSongForm', 'Add Book'),
translate('SongsPlugin.EditSongForm', 'This song book does not exist, do you want to add it?'),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) == QtGui.QMessageBox.Yes:
book = Book.populate(name=text, publisher='')
self.manager.save_object(book)
else:
return False
# Validate tags (lp#1199639)
misplaced_tags = []
verse_tags = []
for i in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(i, 0)
tags = self.find_tags.findall(item.text())
field = item.data(QtCore.Qt.UserRole)
verse_tags.append(field)
if not self._validate_tags(tags):
misplaced_tags.append('%s %s' % (VerseType.translated_name(field[0]), field[1:]))
if misplaced_tags:
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm',
'There are misplaced formatting tags in the following verses:\n\n%s\n\n'
'Please correct these tags before continuing.' % ', '.join(misplaced_tags)))
return False
for tag in verse_tags:
if verse_tags.count(tag) > 26:
# lp#1310523: OpenLyrics allows only a-z variants of one verse:
# http://openlyrics.info/dataformat.html#verse-name
critical_error_message_box(message=translate(
'SongsPlugin.EditSongForm', 'You have %(count)s verses named %(name)s %(number)s. '
'You can have at most 26 verses with the same name' %
{'count': verse_tags.count(tag),
'name': VerseType.translated_name(tag[0]),
'number': tag[1:]}))
return False
return True
def _validate_tags(self, tags, first_time=True):
"""
Validates a list of tags
Deletes the first affiliated tag pair which is located side by side in the list
and call itself recursively with the shortened tag list.
If there is any misplaced tag in the list, either the length of the tag list is not even,
or the function won't find any tag pairs side by side.
If there is no misplaced tag, the length of the list will be zero on any recursive run.
:param tags: A list of tags
:return: True if the function can't find any mismatched tags. Else False.
"""
if first_time:
fixed_tags = []
for i in range(len(tags)):
if tags[i] != '{br}':
fixed_tags.append(tags[i])
tags = fixed_tags
if len(tags) == 0:
return True
if len(tags) % 2 != 0:
return False
for i in range(len(tags)-1):
if tags[i+1] == "{/" + tags[i][1:]:
del tags[i:i+2]
return self._validate_tags(tags, False)
return False
def _process_lyrics(self):
"""
Process the lyric data entered by the user into the OpenLP XML format.
"""
# This method must only be run after the self.song = Song() assignment.
log.debug('_processLyrics')
sxml = None
try:
sxml = SongXML()
multiple = []
for i in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(i, 0)
verse_id = item.data(QtCore.Qt.UserRole)
verse_tag = verse_id[0]
verse_num = verse_id[1:]
sxml.add_verse_to_lyrics(verse_tag, verse_num, item.text())
if verse_num > '1' and verse_tag not in multiple:
multiple.append(verse_tag)
self.song.lyrics = str(sxml.extract_xml(), 'utf-8')
for verse in multiple:
self.song.verse_order = re.sub('([' + verse.upper() + verse.lower() + '])(\W|$)',
r'\g<1>1\2', self.song.verse_order)
except:
log.exception('Problem processing song Lyrics \n%s', sxml.dump_xml())
raise
def _process_chords(self):
"""
Process the chords data entered by the user into the OpenLP XML format.
"""
# This method must only be run after the self.song = Song() assignment.
log.debug('_processChords')
sxml = None
try:
sxml = SongXML()
for row in self.chords_lyrics_list:
for match in row.split('---['):
for count, parts in enumerate(match.split(']---\n')):
if count == 0:
# Processing verse tag
if len(parts) == 0:
continue
# handling carefully user inputted versetags
separator = parts.find(':')
if separator >= 0:
verse_name = parts[0:separator].strip()
verse_num = parts[separator + 1:].strip()
else:
verse_name = parts
verse_num = '1'
verse_index = VerseType.from_loose_input(verse_name)
verse_tag = VerseType.tags[verse_index]
# Later we need to handle v1a as well.
regex = re.compile(r'\D*(\d+)\D*')
match = regex.match(verse_num)
if match:
verse_num = match.group(1)
else:
verse_num = '1'
verse_def = '%s%s' % (verse_tag, verse_num)
else:
# Processing lyrics
if parts.endswith('\n'):
parts = parts.rstrip('\n')
previous_line = '¬¬DONE¬¬'
section_text = ''
for line in parts.split('\n'):
if previous_line == '¬¬DONE¬¬':
if line.rstrip().endswith('@'):
previous_line = line
elif line.startswith('['):
# Break line
section_text += line + '\n'
else:
# Lyrics line
section_text += line.replace("#", "") + '\n'
else:
# Previous line was chords...
if line.rstrip().endswith('@'):
# Two successive lines of chords.
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key) + '\n'
previous_line = line
elif line.startswith('['):
# Break line following chords
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key) + '\n'
section_text += line + '\n'
previous_line = '¬¬DONE¬¬'
elif line.replace(" ", "") == '':
# Spacer line following Chords
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key) + '\n'
section_text += '\n'
previous_line = '¬¬DONE¬¬'
else:
# These are lyrics corresponding to previous chords
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), line, self.song.song_key) + '\n'
previous_line = '¬¬DONE¬¬'
if not previous_line == '¬¬DONE¬¬':
# Process final line of chords stored in previous_line; no corresponding lyrics
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key)
if section_text.endswith('\n'):
section_text = section_text.rstrip('\n')
sxml.add_verse_to_lyrics(verse_tag, verse_num, section_text)
self.song.chords = str(sxml.extract_xml(), 'utf-8')
except:
log.exception('Problem processing song chords \n%s', sxml.dump_xml())
raise
def keyPressEvent(self, event):
"""
Re-implement the keyPressEvent to react on Return/Enter keys. When some combo boxes have focus we do not want
dialog's default action be triggered but instead our own.
:param event: A QtGui.QKeyEvent event.
"""
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
if self.authors_combo_box.hasFocus() and self.authors_combo_box.currentText():
self.on_author_add_button_clicked()
return
if self.topics_combo_box.hasFocus() and self.topics_combo_box.currentText():
self.on_topic_add_button_clicked()
return
QtGui.QDialog.keyPressEvent(self, event)
def initialise(self):
"""
Set up the form for when it is displayed.
"""
self.verse_edit_chords_button.setEnabled(False)
self.verse_delete_button.setEnabled(False)
self.author_edit_button.setEnabled(False)
self.author_remove_button.setEnabled(False)
self.topic_remove_button.setEnabled(False)
def load_authors(self):
"""
Load the authors from the database into the combobox.
"""
authors = self.manager.get_all_objects(Author, order_by_ref=Author.display_name)
self.authors_combo_box.clear()
self.authors_combo_box.addItem('')
self.authors = []
for author in authors:
row = self.authors_combo_box.count()
self.authors_combo_box.addItem(author.display_name)
self.authors_combo_box.setItemData(row, author.id)
self.authors.append(author.display_name)
set_case_insensitive_completer(self.authors, self.authors_combo_box)
# Types
self.author_types_combo_box.clear()
# Don't iterate over the dictionary to give them this specific order
for author_type in AuthorType.SortedTypes:
self.author_types_combo_box.addItem(AuthorType.Types[author_type], author_type)
def load_topics(self):
"""
Load the topics into the combobox.
"""
self.topics = []
self._load_objects(Topic, self.topics_combo_box, self.topics)
def load_books(self):
"""
Load the song books into the combobox
"""
self.books = []
self._load_objects(Book, self.song_book_combo_box, self.books)
def load_themes(self, theme_list):
"""
Load the themes into a combobox.
"""
self.theme_combo_box.clear()
self.theme_combo_box.addItem('')
self.themes = theme_list
self.theme_combo_box.addItems(theme_list)
set_case_insensitive_completer(self.themes, self.theme_combo_box)
def load_media_files(self):
"""
Load the media files into a combobox.
"""
self.from_media_button.setVisible(False)
for plugin in self.plugin_manager.plugins:
if plugin.name == 'media' and plugin.status == PluginStatus.Active:
self.from_media_button.setVisible(True)
self.media_form.populate_files(plugin.media_item.get_list(MediaType.Audio))
break
def new_song(self):
"""
Blank the edit form out in preparation for a new song.
"""
log.debug('New Song')
self.song = None
self.initialise()
self.song_tab_widget.setCurrentIndex(0)
self.title_edit.clear()
self.alternative_edit.clear()
self.copyright_edit.clear()
self.verse_order_edit.clear()
self.song_key_edit.setCurrentIndex(-1)
self.transpose_edit.setValue(0)
self.comments_edit.clear()
self.ccli_number_edit.clear()
self.verse_list_widget.clear()
self.verse_list_widget.setRowCount(0)
self.authors_list_view.clear()
self.topics_list_view.clear()
self.audio_list_widget.clear()
self.title_edit.setFocus()
self.song_book_number_edit.clear()
self.load_authors()
self.load_topics()
self.load_books()
self.load_media_files()
self.theme_combo_box.setEditText('')
self.theme_combo_box.setCurrentIndex(0)
# it's a new song to preview is not possible
self.preview_button.setVisible(False)
self.chords_lyrics_list = []
def load_song(self, song_id, preview=False):
"""
Loads a song.
:param song_id: The song id (int).
:param preview: Should be ``True`` if the song is also previewed (boolean).
"""
log.debug('Load Song')
self.initialise()
self.song_tab_widget.setCurrentIndex(0)
self.load_authors()
self.load_topics()
self.load_books()
self.load_media_files()
self.song = self.manager.get_object(Song, song_id)
self.title_edit.setText(self.song.title)
self.alternative_edit.setText(
self.song.alternate_title if self.song.alternate_title else '')
self.song_key_edit.setCurrentIndex(
self.song_key_edit.findText(self.song.song_key) if self.song.song_key else -1)
self.transpose_edit.setValue(
self.song.transpose_by if self.song.transpose_by else 0)
if self.song.song_book_id != 0:
book_name = self.manager.get_object(Book, self.song.song_book_id)
find_and_set_in_combo_box(self.song_book_combo_box, str(book_name.name))
else:
self.song_book_combo_box.setEditText('')
self.song_book_combo_box.setCurrentIndex(0)
if self.song.theme_name:
find_and_set_in_combo_box(self.theme_combo_box, str(self.song.theme_name))
else:
# Clear the theme combo box in case it was previously set (bug #1212801)
self.theme_combo_box.setEditText('')
self.theme_combo_box.setCurrentIndex(0)
self.copyright_edit.setText(self.song.copyright if self.song.copyright else '')
self.comments_edit.setPlainText(self.song.comments if self.song.comments else '')
self.ccli_number_edit.setText(self.song.ccli_number if self.song.ccli_number else '')
self.song_book_number_edit.setText(self.song.song_number if self.song.song_number else '')
# lazy xml migration for now
self.verse_list_widget.clear()
self.verse_list_widget.setRowCount(0)
verse_tags_translated = False
if self.song.lyrics.startswith('<?xml version='):
song_xml = SongXML()
verse_list = song_xml.get_verses(self.song.lyrics)
for count, verse in enumerate(verse_list):
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
# This silently migrates from localized verse type markup.
# If we trusted the database, this would be unnecessary.
verse_tag = verse[0]['type']
index = None
if len(verse_tag) > 1:
index = VerseType.from_translated_string(verse_tag)
if index is None:
index = VerseType.from_string(verse_tag, None)
else:
verse_tags_translated = True
if index is None:
index = VerseType.from_tag(verse_tag)
verse[0]['type'] = VerseType.tags[index]
if verse[0]['label'] == '':
verse[0]['label'] = '1'
verse_def = '%s%s' % (verse[0]['type'], verse[0]['label'])
item = QtGui.QTableWidgetItem(verse[1])
item.setData(QtCore.Qt.UserRole, verse_def)
self.verse_list_widget.setItem(count, 0, item)
else:
verses = self.song.lyrics.split('\n\n')
for count, verse in enumerate(verses):
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
item = QtGui.QTableWidgetItem(verse)
verse_def = '%s%s' % (VerseType.tags[VerseType.Verse], str(count + 1))
item.setData(QtCore.Qt.UserRole, verse_def)
self.verse_list_widget.setItem(count, 0, item)
if self.song.verse_order:
# we translate verse order
translated = []
for verse_def in self.song.verse_order.split():
verse_index = None
if verse_tags_translated:
verse_index = VerseType.from_translated_tag(verse_def[0], None)
if verse_index is None:
verse_index = VerseType.from_tag(verse_def[0])
verse_tag = VerseType.translated_tags[verse_index].upper()
translated.append('%s%s' % (verse_tag, verse_def[1:]))
self.verse_order_edit.setText(' '.join(translated))
else:
self.verse_order_edit.setText('')
self.tag_rows()
# clear the results
self.authors_list_view.clear()
for author_song in self.song.authors_songs:
self._add_author_to_list(author_song.author, author_song.author_type)
# clear the results
self.topics_list_view.clear()
for topic in self.song.topics:
topic_name = QtGui.QListWidgetItem(str(topic.name))
topic_name.setData(QtCore.Qt.UserRole, topic.id)
self.topics_list_view.addItem(topic_name)
self.audio_list_widget.clear()
for media in self.song.media_files:
media_file = QtGui.QListWidgetItem(os.path.split(media.file_name)[1])
media_file.setData(QtCore.Qt.UserRole, media.file_name)
self.audio_list_widget.addItem(media_file)
self.title_edit.setFocus()
# Hide or show the preview button.
self.preview_button.setVisible(preview)
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
# Process chords XML
if self.song.chords:
song_2_xml = SongXML()
verse_chords_xml = song_2_xml.get_verses(self.song.chords)
self.chords_lyrics_list = []
for count, verse in enumerate(verse_chords_xml):
# This silently migrates from localized verse type markup.
# If we trusted the database, this would be unnecessary.
verse_tag = verse[0]['type']
index = None
if len(verse_tag) > 1:
index = VerseType.from_translated_string(verse_tag)
if index is None:
index = VerseType.from_string(verse_tag, None)
else:
verse_tags_translated = True
if index is None:
index = VerseType.from_tag(verse_tag)
verse[0]['type'] = VerseType.tags[index]
if verse[0]['label'] == '':
verse[0]['label'] = '1'
verse_tag = VerseType.translated_name(verse[0]['type'])
self.chords_lyrics_item = '---[%s:%s]---\n' % (verse_tag, verse[0]['label'])
for line in verse[1].split('\n'):
if line == '':
self.chords_lyrics_item += '\n'
else:
parsed_line = Chords.parseXmlToLines(line)
if not parsed_line[0].strip() == '':
self.chords_lyrics_item += parsed_line[0]
self.chords_lyrics_item += '@\n'
if not parsed_line[1].replace('#', '').strip() == '':
self.chords_lyrics_item += parsed_line[1]
self.chords_lyrics_item += '\n'
if self.chords_lyrics_item.endswith('\n'):
self.chords_lyrics_item = self.chords_lyrics_item.rstrip('\n')
self.chords_lyrics_list.append(self.chords_lyrics_item)
else:
# Only have lyrics for this song, so load them into the list...
self.chords_lyrics_list = []
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
field = item.data(QtCore.Qt.UserRole)
verse_tag = VerseType.translated_name(field[0])
verse_num = field[1:]
self.chords_lyrics_item = '---[%s:%s]---\n' % (verse_tag, verse_num)
self.chords_lyrics_item += item.text()
self.chords_lyrics_list.append(self.chords_lyrics_item)
def tag_rows(self):
"""
Tag the Song List rows based on the verse list
"""
row_label = []
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
verse_def = item.data(QtCore.Qt.UserRole)
verse_tag = VerseType.translated_tag(verse_def[0])
row_def = '%s%s' % (verse_tag, verse_def[1:])
row_label.append(row_def)
self.verse_list_widget.setVerticalHeaderLabels(row_label)
self.verse_list_widget.resizeRowsToContents()
self.verse_list_widget.repaint()
def on_author_add_button_clicked(self):
"""
Add the author to the list of authors associated with this song when the button is clicked.
"""
item = int(self.authors_combo_box.currentIndex())
# Also remove commas from author names. Songs that have authors with commas in
# their names are re-added to database when a service plan containing themis loaded
text = self.authors_combo_box.currentText().strip(' \r\n\t').replace(',','')
author_type = self.author_types_combo_box.itemData(self.author_types_combo_box.currentIndex())
# This if statement is for OS X, which doesn't seem to work well with
# the QCompleter auto-completion class. See bug #812628.
if text in self.authors:
# Index 0 is a blank string, so add 1
item = self.authors.index(text) + 1
if item == 0 and text:
if QtGui.QMessageBox.question(
self,
translate('SongsPlugin.EditSongForm', 'Add Author'),
translate('SongsPlugin.EditSongForm', 'This author does not exist, do you want to add them?'),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) == QtGui.QMessageBox.Yes:
if text.find(' ') == -1:
author = Author.populate(first_name='', last_name='', display_name=text)
else:
author = Author.populate(first_name=text.rsplit(' ', 1)[0], last_name=text.rsplit(' ', 1)[1],
display_name=text)
self.manager.save_object(author)
self._add_author_to_list(author, author_type)
self.load_authors()
self.authors_combo_box.setCurrentIndex(0)
else:
return
elif item > 0:
item_id = (self.authors_combo_box.itemData(item))
author = self.manager.get_object(Author, item_id)
if self.authors_list_view.findItems(author.get_display_name(author_type), QtCore.Qt.MatchExactly):
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'This author is already in the list.'))
else:
self._add_author_to_list(author, author_type)
self.authors_combo_box.setCurrentIndex(0)
else:
QtGui.QMessageBox.warning(
self, UiStrings().NISs,
translate('SongsPlugin.EditSongForm', 'You have not selected a valid author. Either select an author '
'from the list, or type in a new author and click the "Add Author to Song" button to add '
'the new author.'))
def on_authors_list_view_clicked(self):
"""
Run a set of actions when an author in the list is selected (mainly enable the delete button).
"""
count = self.authors_list_view.count()
if count > 0:
self.author_edit_button.setEnabled(True)
if count > 1:
# There must be at least one author
self.author_remove_button.setEnabled(True)
def on_author_edit_button_clicked(self):
"""
Show a dialog to change the type of an author when the edit button is clicked
"""
self.author_edit_button.setEnabled(False)
item = self.authors_list_view.currentItem()
author_id, author_type = item.data(QtCore.Qt.UserRole)
choice, ok = QtGui.QInputDialog.getItem(self, translate('SongsPlugin.EditSongForm', 'Edit Author Type'),
translate('SongsPlugin.EditSongForm', 'Choose type for this author'),
AuthorType.TranslatedTypes,
current=AuthorType.SortedTypes.index(author_type),
editable=False)
if not ok:
return
author = self.manager.get_object(Author, author_id)
author_type = AuthorType.from_translated_text(choice)
item.setData(QtCore.Qt.UserRole, (author_id, author_type))
item.setText(author.get_display_name(author_type))
def on_author_remove_button_clicked(self):
"""
Remove the author from the list when the delete button is clicked.
"""
if self.authors_list_view.count() <= 2:
self.author_remove_button.setEnabled(False)
item = self.authors_list_view.currentItem()
row = self.authors_list_view.row(item)
self.authors_list_view.takeItem(row)
def on_topic_add_button_clicked(self):
item = int(self.topics_combo_box.currentIndex())
text = self.topics_combo_box.currentText()
if item == 0 and text:
if QtGui.QMessageBox.question(
self, translate('SongsPlugin.EditSongForm', 'Add Topic'),
translate('SongsPlugin.EditSongForm', 'This topic does not exist, do you want to add it?'),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) == QtGui.QMessageBox.Yes:
topic = Topic.populate(name=text)
self.manager.save_object(topic)
topic_item = QtGui.QListWidgetItem(str(topic.name))
topic_item.setData(QtCore.Qt.UserRole, topic.id)
self.topics_list_view.addItem(topic_item)
self.load_topics()
self.topics_combo_box.setCurrentIndex(0)
else:
return
elif item > 0:
item_id = (self.topics_combo_box.itemData(item))
topic = self.manager.get_object(Topic, item_id)
if self.topics_list_view.findItems(str(topic.name), QtCore.Qt.MatchExactly):
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'This topic is already in the list.'))
else:
topic_item = QtGui.QListWidgetItem(str(topic.name))
topic_item.setData(QtCore.Qt.UserRole, topic.id)
self.topics_list_view.addItem(topic_item)
self.topics_combo_box.setCurrentIndex(0)
else:
QtGui.QMessageBox.warning(
self, UiStrings().NISs,
translate('SongsPlugin.EditSongForm', 'You have not selected a valid topic. Either select a topic '
'from the list, or type in a new topic and click the "Add Topic to Song" button to add the '
'new topic.'))
def on_topic_list_view_clicked(self):
self.topic_remove_button.setEnabled(True)
def on_topic_remove_button_clicked(self):
self.topic_remove_button.setEnabled(False)
item = self.topics_list_view.currentItem()
row = self.topics_list_view.row(item)
self.topics_list_view.takeItem(row)
def on_verse_list_view_clicked(self):
self.verse_edit_chords_button.setEnabled(True)
self.verse_delete_button.setEnabled(True)
def on_verse_add_button_clicked(self):
self.verse_chords_form.set_verse('', True)
if self.verse_chords_form.exec_():
after_text, verse_tag, verse_num = self.verse_chords_form.get_verse()
verse_def = '%s%s' % (verse_tag, verse_num)
verse_list_def = '---[%s:%s]---\n' % (VerseType.translated_name(verse_tag), verse_num)
self.chords_lyrics_list.append(verse_list_def + after_text)
lyric_text = ''
for line in after_text.split('\n'):
if not line.rstrip().endswith('@'):
# Add on next lyric line, removing any chord padding (#)
lyric_text += line.replace("#", "") + '\n'
if lyric_text.endswith('\n'):
lyric_text = lyric_text.rstrip('\n')
item = QtGui.QTableWidgetItem(lyric_text)
item.setData(QtCore.Qt.UserRole, verse_def)
item.setText(lyric_text)
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
self.verse_list_widget.setItem(self.verse_list_widget.rowCount() - 1, 0, item)
self.tag_rows()
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
def on_verse_edit_chords_button_clicked(self):
item = self.verse_list_widget.currentItem()
temp_text = '\n'.join(self.chords_lyrics_list[self.verse_list_widget.currentRow()].split('\n')[1:])
if temp_text:
verse_id = item.data(QtCore.Qt.UserRole)
self.verse_chords_form.set_verse(temp_text, True, verse_id)
if self.verse_chords_form.exec_():
after_text, verse_tag, verse_num = self.verse_chords_form.get_verse()
verse_def = '%s%s' % (verse_tag, verse_num)
verse_list_def = '---[%s:%s]---\n' % (VerseType.translated_name(verse_tag), verse_num)
self.chords_lyrics_list[self.verse_list_widget.currentRow()] = verse_list_def + after_text
lyric_text = ''
for line in after_text.split('\n'):
if not line.rstrip().endswith('@'):
# Add on next lyric line, removing any chord padding (#)
lyric_text += line.replace("#", "") + '\n'
if lyric_text.endswith('\n'):
lyric_text = lyric_text.rstrip('\n')
item.setData(QtCore.Qt.UserRole, verse_def)
item.setText(lyric_text)
# number of lines has changed, repaint the list moving the data
if len(temp_text.split('\n')) != len(lyric_text.split('\n')):
temp_list = []
temp_ids = []
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
temp_list.append(item.text())
temp_ids.append(item.data(QtCore.Qt.UserRole))
self.verse_list_widget.clear()
for row, entry in enumerate(temp_list):
item = QtGui.QTableWidgetItem(entry, 0)
item.setData(QtCore.Qt.UserRole, temp_ids[row])
self.verse_list_widget.setItem(row, 0, item)
self.tag_rows()
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
def on_verse_edit_all_chords_button_clicked(self):
"""
Verse edit all chords button (save) pressed
:return:
"""
if not self.chords_lyrics_list == []:
verse_list = ''
for row in self.chords_lyrics_list:
verse_list += row + '\n'
self.verse_chords_form.set_verse(verse_list)
else:
verse_list = ''
if self.verse_list_widget.rowCount() > 0:
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
field = item.data(QtCore.Qt.UserRole)
verse_tag = VerseType.translated_name(field[0])
verse_num = field[1:]
verse_list += '---[%s:%s]---\n' % (verse_tag, verse_num)
verse_list += item.text()
verse_list += '\n'
self.verse_chords_form.set_verse(verse_list)
else:
self.verse_chords_form.set_verse('')
if not self.verse_chords_form.exec_():
return
verse_chords_list = self.verse_chords_form.get_all_verses()
verse_chords_list = str(verse_chords_list.replace('\r\n', '\n'))
# Update temporary storage of chords and lyrics information (update self.song and database in
# save_song method. Also strip out chord lines and # characters and update verse_list_widget.
self.chords_lyrics_list = []
self.verse_list_widget.clear()
self.verse_list_widget.setRowCount(0)
for row in self.find_verse_split.split(verse_chords_list):
for match in row.split('---['):
chords_lyrics_item = ''
for count, parts in enumerate(match.split(']---\n')):
if count == 0:
# Processing verse tag
if len(parts) == 0:
continue
# handling carefully user inputted versetags
separator = parts.find(':')
if separator >= 0:
verse_name = parts[0:separator].strip()
verse_num = parts[separator + 1:].strip()
else:
verse_name = parts
verse_num = '1'
verse_index = VerseType.from_loose_input(verse_name)
verse_tag = VerseType.tags[verse_index]
# Later we need to handle v1a as well.
regex = re.compile(r'\D*(\d+)\D*')
match = regex.match(verse_num)
if match:
verse_num = match.group(1)
else:
verse_num = '1'
verse_def = '%s%s' % (verse_tag, verse_num)
verse_list_def = '---[%s:%s]---\n' % (VerseType.translated_name(verse_tag), verse_num)
else:
# Processing lyrics
if parts.endswith('\n'):
parts = parts.rstrip('\n')
lyric_parts = ''
for line in parts.split('\n'):
if not line.rstrip().endswith('@'):
# Add on next lyric line, removing any chord padding (#)
lyric_parts += line.replace("#", "") + '\n'
if lyric_parts.endswith('\n'):
lyric_parts = lyric_parts.rstrip('\n')
item = QtGui.QTableWidgetItem(lyric_parts)
item.setData(QtCore.Qt.UserRole, verse_def)
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
self.verse_list_widget.setItem(self.verse_list_widget.rowCount() - 1, 0, item)
self.chords_lyrics_list.append(verse_list_def + parts)
self.tag_rows()
self.verse_edit_chords_button.setEnabled(False)
self.verse_delete_button.setEnabled(False)
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
def on_verse_delete_button_clicked(self):
"""
Verse Delete button pressed
"""
index = self.verse_list_widget.currentRow()
self.verse_list_widget.removeRow(index)
del self.chords_lyrics_list[index]
if not self.verse_list_widget.selectedItems():
self.verse_edit_chords_button.setEnabled(False)
self.verse_delete_button.setEnabled(False)
def on_verse_order_text_changed(self, text):
"""
Checks if the verse order is complete or missing. Shows a error message according to the state of the verse
order.
:param text: The text of the verse order edit (ignored).
"""
# Extract all verses which were used in the order.
verses_in_order = self._extract_verse_order(self.verse_order_edit.text())
# Find the verses which were not used in the order.
verses_not_used = []
for index in range(self.verse_list_widget.rowCount()):
verse = self.verse_list_widget.item(index, 0)
verse = verse.data(QtCore.Qt.UserRole)
if verse not in verses_in_order:
verses_not_used.append(verse)
# Set the label text.
label_text = ''
# No verse order was entered.
if not verses_in_order:
label_text = self.no_verse_order_entered_warning
# The verse order does not contain all verses.
elif verses_not_used:
label_text = self.not_all_verses_used_warning
self.warning_label.setText(label_text)
def on_copyright_insert_button_triggered(self):
"""
Copyright insert button pressed
"""
text = self.copyright_edit.text()
pos = self.copyright_edit.cursorPosition()
sign = SongStrings.CopyrightSymbol
text = text[:pos] + sign + text[pos:]
self.copyright_edit.setText(text)
self.copyright_edit.setFocus()
self.copyright_edit.setCursorPosition(pos + len(sign))
def on_maintenance_button_clicked(self):
"""
Maintenance button pressed
"""
temp_song_book = None
item = int(self.song_book_combo_box.currentIndex())
text = self.song_book_combo_box.currentText()
if item == 0 and text:
temp_song_book = text
self.media_item.song_maintenance_form.exec_(True)
self.load_authors()
self.load_books()
self.load_topics()
if temp_song_book:
self.song_book_combo_box.setEditText(temp_song_book)
def on_preview(self, button):
"""
Save and Preview button clicked.
The Song is valid so as the plugin to add it to preview to see.
:param button: A button (QPushButton).
"""
log.debug('onPreview')
if button.objectName() == 'preview_button':
self.save_song(True)
Registry().execute('songs_preview')
def on_audio_add_from_file_button_clicked(self):
"""
Loads file(s) from the filesystem.
"""
filters = '%s (*)' % UiStrings().AllFiles
file_names = FileDialog.getOpenFileNames(self, translate('SongsPlugin.EditSongForm', 'Open File(s)'), '',
filters)
for filename in file_names:
item = QtGui.QListWidgetItem(os.path.split(str(filename))[1])
item.setData(QtCore.Qt.UserRole, filename)
self.audio_list_widget.addItem(item)
def on_audio_add_from_media_button_clicked(self):
"""
Loads file(s) from the media plugin.
"""
if self.media_form.exec_():
for filename in self.media_form.get_selected_files():
item = QtGui.QListWidgetItem(os.path.split(str(filename))[1])
item.setData(QtCore.Qt.UserRole, filename)
self.audio_list_widget.addItem(item)
def on_audio_remove_button_clicked(self):
"""
Removes a file from the list.
"""
row = self.audio_list_widget.currentRow()
if row == -1:
return
self.audio_list_widget.takeItem(row)
def on_audio_remove_all_button_clicked(self):
"""
Removes all files from the list.
"""
self.audio_list_widget.clear()
def on_up_button_clicked(self):
"""
Moves a file up when the user clicks the up button on the audio tab.
"""
row = self.audio_list_widget.currentRow()
if row <= 0:
return
item = self.audio_list_widget.takeItem(row)
self.audio_list_widget.insertItem(row - 1, item)
self.audio_list_widget.setCurrentRow(row - 1)
def on_down_button_clicked(self):
"""
Moves a file down when the user clicks the up button on the audio tab.
"""
row = self.audio_list_widget.currentRow()
if row == -1 or row > self.audio_list_widget.count() - 1:
return
item = self.audio_list_widget.takeItem(row)
self.audio_list_widget.insertItem(row + 1, item)
self.audio_list_widget.setCurrentRow(row + 1)
def on_key_or_transpose_change(self):
"""
Updates the tranposed key display when the user updates the song key or transpose amount.
"""
if (self.song_key_edit.currentIndex() > -1) and (self.transpose_edit.value() != 0):
self.transposed_key_label.setText('Transposed to: ' + Chords.key_list[
(self.song_key_edit.currentIndex() + self.transpose_edit.value()) % 12])
else:
self.transposed_key_label.setText('')
def clear_caches(self):
"""
Free up auto-completion memory on dialog exit
"""
log.debug('SongEditForm.clearCaches')
self.authors = []
self.themes = []
self.books = []
self.topics = []
def reject(self):
"""
Exit Dialog and do not save
"""
log.debug('SongEditForm.reject')
self.clear_caches()
QtGui.QDialog.reject(self)
def accept(self):
"""
Exit Dialog and save song if valid
"""
log.debug('SongEditForm.accept')
self.clear_caches()
if self._validate_song():
self.save_song()
self.song = None
QtGui.QDialog.accept(self)
def save_song(self, preview=False):
"""
Get all the data from the widgets on the form, and then save it to the database. The form has been validated
and all reference items (Authors, Books and Topics) have been saved before this function is called.
:param preview: Should be ``True`` if the song is also previewed (boolean).
"""
# The Song() assignment. No database calls should be made while a
# Song() is in a partially complete state.
if not self.song:
self.song = Song()
self.song.title = self.title_edit.text()
self.song.alternate_title = self.alternative_edit.text()
self.song.song_key = self.song_key_edit.currentText()
self.song.transpose_by = self.transpose_edit.value()
self.song.copyright = self.copyright_edit.text()
# Values will be set when cleaning the song.
self.song.search_title = ''
self.song.search_lyrics = ''
self.song.verse_order = ''
self.song.comments = self.comments_edit.toPlainText()
order_text = self.verse_order_edit.text()
order = []
for item in order_text.split():
verse_tag = VerseType.tags[VerseType.from_translated_tag(item[0])]
verse_num = item[1:].lower()
order.append('%s%s' % (verse_tag, verse_num))
self.song.verse_order = ' '.join(order)
self.song.ccli_number = self.ccli_number_edit.text()
self.song.song_number = self.song_book_number_edit.text()
book_name = self.song_book_combo_box.currentText()
if book_name:
self.song.book = self.manager.get_object_filtered(Book, Book.name == book_name)
else:
self.song.book = None
theme_name = self.theme_combo_box.currentText()
if theme_name:
self.song.theme_name = theme_name
else:
self.song.theme_name = None
self._process_lyrics()
self._process_chords()
self.song.authors_songs = []
for row in range(self.authors_list_view.count()):
item = self.authors_list_view.item(row)
self.song.add_author(self.manager.get_object(Author, item.data(QtCore.Qt.UserRole)[0]),
item.data(QtCore.Qt.UserRole)[1])
self.song.topics = []
for row in range(self.topics_list_view.count()):
item = self.topics_list_view.item(row)
topic_id = (item.data(QtCore.Qt.UserRole))
topic = self.manager.get_object(Topic, topic_id)
if topic is not None:
self.song.topics.append(topic)
# Save the song here because we need a valid id for the audio files.
clean_song(self.manager, self.song)
self.manager.save_object(self.song)
audio_files = [a.file_name for a in self.song.media_files]
log.debug(audio_files)
save_path = os.path.join(AppLocation.get_section_data_path(self.media_item.plugin.name), 'audio',
str(self.song.id))
check_directory_exists(save_path)
self.song.media_files = []
files = []
for row in range(self.audio_list_widget.count()):
item = self.audio_list_widget.item(row)
filename = item.data(QtCore.Qt.UserRole)
if not filename.startswith(save_path):
old_file, filename = filename, os.path.join(save_path, os.path.split(filename)[1])
shutil.copyfile(old_file, filename)
files.append(filename)
media_file = MediaFile()
media_file.file_name = filename
media_file.type = 'audio'
media_file.weight = row
self.song.media_files.append(media_file)
for audio in audio_files:
if audio not in files:
try:
os.remove(audio)
except:
log.exception('Could not remove file: %s', audio)
if not files:
try:
os.rmdir(save_path)
except OSError:
log.exception('Could not remove directory: %s', save_path)
clean_song(self.manager, self.song)
self.manager.save_object(self.song)
self.media_item.auto_select_id = self.song.id
|
gpl-2.0
| -344,718,849,953,748,350
| 46.429921
| 143
| 0.552394
| false
| 4.012523
| false
| false
| false
|
zhuwbigdata/hadoop-admin-utils
|
ambari-utils/python2/getConfig2.py
|
1
|
5465
|
#!/usr/bin/python
# Get a handle to the API client
import ssl
import sys
import pprint
import argparse
import requests
SERVICE_TYPE_MAP = {
'zookeeper': 'ZOOKEEPER',
'hdfs': 'HDFS',
'hbase': 'HBASE',
'yarn': 'YARN',
'oozie': 'OOZIE',
'hbase': 'HBASE',
'kafka': 'KAFKA',
}
SERVICE_ROLE_TYPE_MAP = {
'zookeeper_server': 'SERVER',
'namenode': 'NAMENODE',
'resourcemanager': 'RESOURCEMANAGER',
'oozie_server': 'OOZIE_SERVER',
'hbase_restserver': 'HBASERESTSERVER',
'kafka_broker': 'KAFKA_BROKER',
}
CONFIG_KEY_VALUE_MAP = {
'NAME_NODE': None,
'NAME_NODE_PORT': '8020',
'JOB_TRACKER': None,
'RESOURCEMANAGER_ADDRESS': '8032',
'OOZIE_URL': None,
'OOZIE_HTTP_PORT': '11000',
'OOZIE_HTTPS_PORT': '11443',
'OOZIE_USE_SSL': 'false',
'ZOOKEEPER_QUORUM': None,
'ZOOKEEPER_PORT': '2181',
'HBASE_REST_IP': None,
'HBASE_REST_PORT': '20550',
'KAFKA_BROKER': None,
'KAFKA_SECURITY_PROTOCOL': 'PLAINTEXT',
}
CONFIG_PROPERTY_MAP = {
'zk_client_port': 'clientPort',
'hdf_nn_ns': 'dfs_federation_namenode_nameservice',
'hdf_nn_port': 'namenode_port',
'yarn_rm_address': 'yarn_resourcemanager_addres',
'oozie_http_port': 'oozie_http_port',
'oozie_https_port': 'oozie_https_port',
'oozie_use_ssl': 'oozie_use_ssl',
'oozie_load_balancer': 'oozie_load_balancer',
'hbase_rs_port': 'hbase_restserver_port',
'hbase_rs_host': 'hbase_restserver_host',
'kafka_client_security_protocol': 'security.inter.broker.protocol',
}
AMBARI_DOMAIN='172.16.95.169'
AMBARI_PORT='8080'
AMBARI_USER_ID='raj_ops'
AMBARI_USER_PW='raj_ops'
def ambariREST( restAPI ) :
url = "http://"+AMBARI_DOMAIN+":"+AMBARI_PORT+restAPI
r= requests.get(url, auth=(AMBARI_USER_ID, AMBARI_USER_PW))
return(json.loads(r.text));
def rmREST( restAPI ) :
url = "http://"+RM_DOMAIN+":"+RM_PORT+restAPI
r=requests.get(url)
return(json.loads(r.text));
def getClusterVersionAndName() :
json_data = ambariREST("/api/v1/clusters")
cname = json_data["items"][0]["Clusters"]["cluster_name"]
cversion =json_data["items"][0]["Clusters"]["version"]
return cname, cversion, json_data;
def getAmbariHosts() :
restAPI = "/api/v1/hosts"
json_data = ambariREST(restAPI)
return(json_data);
def getConfigGroups() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/config_groups"
json_data = ambariREST(restAPI)
return(json_data);
def getServiceConfigTypes() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/configurations"
json_data = ambariREST(restAPI)
return(json_data);
def getServiceActualConfigurations() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME
json_data = ambariREST(restAPI)
return(json_data);
def getStackVersions() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/stack_versions/"
json_data = ambariREST(restAPI)
return(json_data);
def getServices( SERVICE) :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/services/"+SERVICE
json_data = ambariREST(restAPI)
return(json_data);
def getResourceManagerInfo() :
restAPI = "/ws/v1/cluster/info"
json_data = rmREST(restAPI)
return(json_data);
def getResourceManagerMetrics() :
restAPI = "/ws/v1/cluster/metrics"
json_data = rmREST(restAPI)
return(json_data);
def getRMschedulerInfo() :
restAPI = "/ws/v1/cluster/scheduler"
json_data = rmREST(restAPI)
return(json_data);
def getAppsSummary() :
restAPI = "/ws/v1/cluster/apps"
json_data = rmREST(restAPI)
return(json_data);
def getAppsStatistics() :
restAPI = "/ws/v1/cluster/appstatictics"
json_data = rmREST(restAPI)
return(json_data);
def getNodesSummary() :
restAPI = "/ws/v1/cluster/nodes"
json_data = rmREST(restAPI)
return(json_data);
def main(cm_fqhn, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled, cm_tls_cafile):
print cm_fqhn, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled, cm_tls_cafile
if cm_tls_enabled == 'false':
print getClusterVersionAndName()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='get configuration from Cloudera Manager API')
parser.add_argument('--cm_fqhn', required=True,
help='Cloudera Manager FQHN')
parser.add_argument('--cm_user_name', required=True,
help='Cloudera Manager User Name')
parser.add_argument('--cm_user_password', required=True,
help='Cloudera Manager User Password')
parser.add_argument('--cm_cluster_name', required=True,
help='Cloudera Manager Cluster Name')
parser.add_argument('--cm_tls_enabled', required=True,
help='Cloudera Manager TLS enabled')
parser.add_argument('--cm_tls_cafile', required=False,
help='Cloudera Manager TLS CA file location')
args = parser.parse_args()
main(cm_fqhn = args.cm_fqhn,
cm_user_name = args.cm_user_name,
cm_user_password = args.cm_user_password,
cm_cluster_name = args.cm_cluster_name,
cm_tls_enabled = args.cm_tls_enabled,
cm_tls_cafile = args.cm_tls_cafile)
|
apache-2.0
| -7,083,016,760,662,906,000
| 30.959064
| 116
| 0.613358
| false
| 3.115735
| true
| false
| false
|
cpn18/track-chart
|
gps-pi/nmea.py
|
1
|
1791
|
#!/usr/bin/python3
"""
NMEA Utils
"""
def tpv_to_json(report):
if report is None:
return {"class": "TPV", "mode": 0}
tpv = {
'class': report['class'],
'mode': report['mode'],
}
for field in ['device', 'status', 'time', 'altHAE', 'altMSL', 'alt',
'climb', 'datum', 'depth', 'dgpsAge', 'dgpsSta',
'epc', 'epd', 'eph', 'eps', 'ept', 'epx', 'epy', 'epv',
'geoidSep', 'lat', 'leapseconds', 'lon', 'track', 'magtrack',
'magvar', 'speed', 'ecefx', 'ecefy', 'ecefz', 'ecefpAcc',
'ecefvx', 'ecefvy', 'ecefvz', 'exefvAcc', 'sep', 'relD',
'relE', 'relN', 'velD', 'velE', 'velN', 'wanglem', 'wangler',
'wanglet', 'wspeedr', 'wspeedt']:
if field in report:
tpv[field] = report[field]
return tpv
def sky_to_json(report):
if report is None:
return {"class": "SKY", "satellites": []}
sky = {
'class': report['class'],
'satellites': [],
}
for field in ['device', 'time', 'gdop', 'hdop', 'pdop', 'tdop', 'vdop',
'xdop', 'ydop']:
if field in report:
sky[field] = report[field]
for i in range(len(report['satellites'])):
sat = report['satellites'][i]
prn = {
"PRN": sat['PRN'],
"used": sat['used'],
}
for field in ['az', 'el', 'ss', 'gnssid', 'svid', 'sigid',
'freqid', 'health']:
if field in sat:
prn[field] = sat[field]
sky['satellites'].append(prn)
return sky
def calc_used(sky):
num_sat = len(sky['satellites'])
num_used = 0
for i in range(num_sat):
if sky['satellites'][i]['used'] is True:
num_used += 1
return (num_used, num_sat)
|
gpl-3.0
| 2,407,027,658,398,743,600
| 31.563636
| 75
| 0.47962
| false
| 2.936066
| false
| false
| false
|
PaulEcoffet/stonewallsgate
|
dunwallsgate/soundmanager.py
|
1
|
1920
|
import os.path
import pygame.mixer
import data
LOOP = -1
def load_music(music_ref):
"""
Charge une musique en mémoire mais ne la joue pas
music_ref - La référence de la musique
"""
music_path = data.get_sound_path(
os.path.join("music", music_ref + ".ogg"))
pygame.mixer.music.load(music_path)
def play_music(music_ref=None, loops=0, start=0.0):
"""
Joue la musique `music_ref`, la répète `loops` fois
en commençant à la seconde `start`.
Si loops = -1, alors la musique est jouée indéfiniment
"""
if music_ref:
load_music(music_ref)
pygame.mixer.music.play(loops, start)
def loop_music(music_ref=None):
"""
Joue en boucle infinie la musique `music_ref`.
"""
play_music(music_ref, LOOP)
def stop_music(fadeout_time=0):
"""
Stop la musique en train d'être jouée.
Si fadeout_time > 0, alors la musique disparaît
en fondu qui dure `fadeout_time` ms.
"""
if fadeout_time > 0:
pygame.mixer.music.fadeout(fadeout_time)
else:
pygame.mixer.music.stop()
def toggle_music(fadeout_time=0):
"""
Active la musique si elle est éteinte, sinon,
il la stoppe
"""
if pygame.mixer.music.get_busy():
stop_music(fadeout_time)
else:
play_music()
def set_music_volume(volume):
"""
Defini le volume de la musique
"""
pygame.mixer.music.set_volume(volume)
def get_music_volume():
"""
Retourne le volume de la musique
"""
return pygame.mixer.music.get_volume()
def play_sound(sound_ref, loops=0, maxtime=0, fade_ms=0):
"""
Joue le son avec la référence `sound_ref` et le rejoue
`loops` fois
"""
sound_path = data.get_sound_path(
os.path.join("sounds", sound_ref + ".ogg"))
sound = pygame.mixer.Sound(sound_path)
pygame.mixer.find_channel().play(sound, loops, maxtime, fade_ms)
|
gpl-2.0
| -166,225,870,081,417,380
| 21.951807
| 68
| 0.626247
| false
| 2.830609
| false
| false
| false
|
Joergen/zamboni
|
sites/landfill/settings_base.py
|
1
|
5699
|
"""private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from lib.settings_base import CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING
from .. import splitstrip
import private_base as private
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['sa_pool_key'] = 'master'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.MemcachedCache',
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
## Celery
BROKER_URL = private.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTIONS_ICON_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
HERA = []
LOGGING['loggers'].update({
'z.task': { 'level': logging.DEBUG },
'z.hera': { 'level': logging.INFO },
'z.redis': { 'level': logging.DEBUG },
'z.pool': { 'level': logging.ERROR },
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = ('https://www.google.com/recaptcha/api/challenge?k=%s' % RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = 20
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'addons-landfill'
BUILDER_SECRET_KEY = private.BUILDER_SECRET_KEY
BUILDER_VERSIONS_URL = "https://builder-addons-dev.allizom.org/repackage/sdk-versions/"
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_landfill' % v) for k, v in ES_INDEXES.items())
BUILDER_UPGRADE_URL = "https://builder-addons-dev.allizom.org/repackage/rebuild/"
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = True
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
PERF_TEST_URL = 'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi'
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
METLOG_CONF = {
'plugins': {'cef': ('metlog_cef.cef_plugin:config_plugin', {})},
'sender': {
'class': 'metlog.senders.UdpSender',
'host': splitstrip(private.METLOG_CONF_SENDER_HOST),
'port': private.METLOG_CONF_SENDER_PORT,
},
}
USE_METLOG_FOR_CEF = True
USE_METLOG_FOR_TASTYPIE = True
ALLOW_SELF_REVIEWS = True
AES_KEYS = private.AES_KEYS
|
bsd-3-clause
| -8,507,801,712,565,959,000
| 28.225641
| 94
| 0.670819
| false
| 2.954381
| false
| false
| false
|
ddinsight/dd-streamworks
|
stream_worker/devmodule/production/vidnet/__init__.py
|
1
|
40613
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 AirPlug Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Not all aatLog items are included in Open DD.
You should ignore some items & tables in this module.
Items not relevant to Open DD are specified.
"""
__author__ = 'jaylee'
import time
from decimal import Decimal
import worker
from worker import log
# traffic mode
class TrafficMode:
SYSTEM = 0
AAT_AGENT = 1
# logType
class aatLogType:
END = 0
START = 1
PERIOD = 2
USER_PAUSE = 3
USER_RESUME = 4
NET_CHANGE = 5
NET_PAUSE = 6
NET_RESUME = 7
NET_CHANGE_DURING_PAUSE = 8
BITRATE_CHANGE = 9
SEEK = 10
UNKNOWN = -1
class VidnetType:
VIDNET_TYPE_PLAY = 0
VIDNET_TYPE_NET_PAUSE = 1
VIDNET_TYPE_NET_RESUME = 2
VIDNET_TYPE_USER_PAUSE = 3
VIDNET_TYPE_USER_RESUME = 4
VIDNET_TYPE_UNKNOWN = 5
class ExceptionType:
NO_AATLOG = 1 # AAT
INVALID_LOG_PATTERN = 2 # live
INVALID_LOGTYPE_IN_VIDET = 3
TRAFFIC_OVERFLOW = 4 # vidnet
class NetworkType:
WIFI = 0
CELLULAR = 1
UNKNOWN = 2
#Following function is not related to Open DD. You should ignore it
def updateMcc(deviceID, plmnId):
try:
rcur = None
strSQL = ""
if plmnId.isdigit() == True and int(plmnId) > 0:
rcur = worker.dbmanager.allocDictCursor('myapmain')
strSQL = "UPDATE mdev SET plmnid = '%s' WHERE mosid = '%s' and (plmnid <= ' ' or plmnid is NULL)" % (plmnId, deviceID)
ret = rcur.execute(strSQL)
except Exception, e:
log.error("updateMcc: %s" % e)
log.error("updateMcc: [deviceID:%s, plmnid:%s, strSQL:%s]" % (deviceID, plmnId, strSQL))
finally:
if rcur <> None:
worker.dbmanager.freeCursor(rcur)
def updateVidLog(waveCursor, vLog, row):
try:
strSQL = ""
strSQL = """UPDATE vidsession_log
SET playTime = if(playTime >= %d, playTime - %d, playTime),
pauseTime = if(pauseTime >= %d, pauseTime - %d, pauseTime),
elapsedTime = if(elapsedTime >= %d, elapsedTime - %d, elapsedTime),
cellRxBytes = if(cellRxBytes >= %d, cellRxBytes - %d, cellRxBytes),
wfRxBytes = if(wfRxBytes >= %d, wfRxBytes - %d, wfRxBytes),
cellDuration = if(cellDuration >= %d, cellDuration - %d, cellDuration),
wfDuration = if(wfDuration >= %d, wfDuration -%d, wfDuration),
lstuptmp = unix_timestamp()
WHERE playSessionID = '%s' and tTM = %s """ % (vLog['playTime'], vLog['playTime'],
vLog['pauseTime'], vLog['pauseTime'],
vLog['elapsedTime'], vLog['elapsedTime'],
vLog['cellRxBytes'], vLog['cellRxBytes'],
vLog['wfRxBytes'], vLog['wfRxBytes'],
vLog['cellDuration'], vLog['cellDuration'],
vLog['wfDuration'], vLog['wfDuration'],
vLog['playSessionID'], row['nextTTM'])
waveCursor.execute(strSQL)
except Exception, e:
log.error("updateVidLog %s" % e)
log.error(vLog)
log.error(row)
if strSQL > "":
log.error("[SQL] %s" % strSQL)
raise e
def getVidLogStatic(vidLogDict, aatLog, appSessionId, netType):
vidLogDict['playSessionID'] = aatLog['playSessionId']
vidLogDict['tTM'] = Decimal(aatLog['tTM'])
vidLogDict['oid'] = aatLog.get('log_time', '')
vidLogDict['appSessionID'] = appSessionId
vidLogDict['logType'] = aatLog.get('agentLogType', -1)
vidLogDict['logStartTime'] = aatLog.get('agentLogStartTime', 0)
vidLogDict['logEndTime'] = aatLog.get('agentLogEndTime', 0)
vidLogDict['cellid'] = "%s_%s_%s" % (aatLog.get('confOperator', ''), aatLog.get('netCID', ''), aatLog.get('netLAC', ''))
vidLogDict['ntype'] = netType
vidLogDict['abrMode'] = aatLog.get('abrMode', '')
if aatLog.has_key('requestBR'):
vidLogDict['curBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
vidLogDict['reqBitrate'] = aatLog['requestBR']
else:
vidLogDict['curBitrate'] = aatLog.get('reqBitrate', 0)
vidLogDict['reqBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
vidLogDict['bbCount'] = aatLog.get('bbCount', 0)
vidLogDict['netCellState'] = aatLog.get('netCellState', '')
vidLogDict['bufferState'] = aatLog.get('playBufferState', '0')
vidLogDict['cellSysRxBytes'] = aatLog.get('trafficSystemMoRxBytes', 0)
vidLogDict['wfSysRxBytes'] = aatLog.get('trafficSystemWFRxBytes', 0)
vidLogDict['playEndState'] = aatLog.get('playEndState', '')
strNetActive = aatLog.get('netActiveNetwork', '')
if strNetActive.upper().find('WIFI') > 0:
strtokens = strNetActive.split('|')
if len(strtokens) > 5:
vidLogDict['ssid'] = strtokens[3]
vidLogDict['bssid'] = strtokens[4]
def vidupdate(waveCursor, aatLog, row):
try:
vidDict = {}
vidLogDict = {}
#get some values to use
cellid = "%s_%s_%s" % (aatLog.get('confOperator', ''), aatLog.get('netCID', ''), aatLog.get('netLAC', ''))
psmode = int(aatLog['playServiceMode'])
logType = aatLog.get('agentLogType', -1)
if aatLog.get('netActiveNetwork', '').find('WIFI') >= 0:
netType = '0'
elif aatLog.get('netActiveNetwork', '').find('mobile') >= 0:
netType = '1'
else:
netType = '2'
batteryStart = 0
batteryEnd = 0
batteryValid = 0
if aatLog.has_key('batteryInfo'):
btList = aatLog['batteryInfo'].split('|')
if len(btList) == 2:
if len(btList[0].split('/')) >= 5 and len(btList[1].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
nTotLevel = float(btList[1].split('/')[3])
nBatLevel = float(btList[1].split('/')[4])
batteryEnd = (nBatLevel/nTotLevel)*100
if btList[1].split('/')[1] == 'DISCHARGING': #All batteryInfo reporting log must be 'DISCHARGING' except first.
batteryValid = 1
else:
batteryValid = 0
elif len(btList) == 1:
if len(btList[0].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
batteryEnd = batteryStart
batteryValid = 0
#get appSessionID
appSessionId = ''
strSQL = """SELECT sessionID FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and (startTime - 5) <= %d and startTime > 0 and (endTime > %d or statAppss > '0') ORDER BY sessionID DESC LIMIT 1""" % (aatLog['deviceID'],
aatLog['pkgName'], aatLog['sID'], aatLog['agentLogStartTime'], aatLog['agentLogStartTime'])
ret = waveCursor.execute(strSQL)
if ret > 0:
aarow = waveCursor.fetchone()
if aarow['sessionID'] > '':
appSessionId = aarow['sessionID']
#vidsession_log values
getVidLogStatic(vidLogDict, aatLog, appSessionId, netType)
#initialize if as-is record has no valid value.
if row['androidID'] == '': vidDict['androidID'] = aatLog.get('deviceID', '')
if row['vID'] == '': vidDict['vID'] = aatLog.get('vID', '')
if row['sID'] == 0: vidDict['sID'] = aatLog.get('sID', 0)
if row['verCode'] == 0: vidDict['verCode'] = aatLog.get('verCode', 0)
if row['osVer'] == '': vidDict['osVer'] = aatLog.get('osVer', '')
if row['brand'] == '': vidDict['brand'] = aatLog.get('brand', '')
if row['model'] == '': vidDict['model'] = aatLog.get('model', '')
if row['cellIdSt'] == '' and len(cellid) > 6: vidDict['cellIdSt'] = cellid
if row['cellIdEnd'] == '' and len(cellid) > 6: vidDict['cellIdEnd'] = cellid
if row['bMao'] < 0: vidDict['bMao'] = int(aatLog.get('agentAatOnOff', -1))
if row['bAnsAllow'] < 0: vidDict['bAnsAllow'] = int(aatLog.get('agentAllowAns', -1))
if row['bCellAllow'] < 0: vidDict['bCellAllow'] = int(aatLog.get('agentAllowMobile', -1))
if row['ansMode'] == '': vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
if row['agentUserSetup'] == '': vidDict['agentUserSetup'] = aatLog.get('agentUserSetup', '')
#if row['startLogType'] == '': vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
if row['hostName'] == '': vidDict['hostName'] = aatLog.get('playHost', '')
if row['originName'] == '': vidDict['originName'] = aatLog.get('playOrigin', '')
if row['contentID'] == '': vidDict['contentID'] = aatLog.get('playContentId', '')
if row['playServiceMode'] <= 0: vidDict['playServiceMode'] = aatLog.get('playServiceMode', 0)
if row['contentSize'] == 0:
if psmode == 1:
vidDict['contentSize'] = aatLog.get('vodContentSize', 0)
elif psmode == 4:
vidDict['contentSize'] = aatLog.get('audContentSize', 0)
elif psmode == 5:
vidDict['contentSize'] = aatLog.get('adnContentSize', 0)
if row['contentDuration'] == 0:
if psmode == 1:
vidDict['contentDuration'] = aatLog.get('vodContentDuration', 0)
elif psmode == 4:
vidDict['contentDuration'] = aatLog.get('audContentDuration', 0)
if row['contentBitrate'] == 0 and psmode in [2,3]:
vidDict['contentBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
#if row['channelName'] == '': vidDict['channelName'] = aatLog.get('playTitle', '').encode('utf-8')
if row['channelName'] == '': vidDict['channelName'] = aatLog.get('playTitle', '')
if row['pkgnm'] == '': vidDict['pkgnm'] = aatLog.get('pkgName', '')
if row['apppkgnm'] == '' or row['appvercd'] == '':
if(aatLog.has_key('playAppPackageName')):
appPkgs = aatLog['playAppPackageName'].split('/')
if len(appPkgs) >= 2:
vidDict['apppkgnm'] = appPkgs[0]
vidDict['appvercd'] = appPkgs[1]
if row['connectedNetCnt'] == 0:
if aatLog.has_key('netConnectedNetworkCount'):
vidDict['connectedNetCnt']=aatLog['netConnectedNetworkCount']
elif aatLog.has_key('netConnectivityCount'):
vidDict['connectedNetCnt']=aatLog['netConnectivityCount']
if row['abrBitrateList'] == '': vidDict['abrBitrateList'] = aatLog.get('playBitrateList', '')
if row['abrUserSelBR'] == '': vidDict['abrUserSelBR'] = aatLog.get('userSelectBitrate', '')
if psmode == 5:
if row['vidnetType'] == 0: vidDict['vidnetType'] = aatLog.get('adnStartCode', 0)
if row['adnMode'] == '' or (row['adnMode'] <> 'BB' and aatLog.get('adnMode', '') == 'BB') :
vidDict['adnMode'] = aatLog.get('adnMode', '')
if row['adnRangeStart'] == 0: vidDict['adnRangeStart'] = aatLog.get('adnContentRangeStart', 0)
if row['adnDownSize'] < aatLog.get('adnDownloadSize', 0):
vidDict['adnDownSize'] = aatLog.get('adnDownloadSize', 0)
if row['contentDuration'] < aatLog.get('adnDownloadTime', 0):
vidDict['contentDuration'] = aatLog.get('adnDownloadTime', 0)
if row['adnContentID'] == 0: vidDict['adnContentID'] = aatLog.get('adnContentID', 0)
vidDict['cellSysRxBytes'] = row['cellSysRxBytes'] + aatLog.get('trafficSystemMoRxBytes', 0)
vidDict['wfSysRxBytes'] = row['wfSysRxBytes'] + aatLog.get('trafficSystemWFRxBytes', 0)
# process attributes depending on log-order
if aatLog['tTM'] > row['maxTTM']: #The log is the last of this playSession
if len(cellid) > 6:
vidDict['cellIdEnd'] = cellid
vidDict['endLogType'] = logType
vidDict['vidnetEndTime'] = aatLog.get('agentLogEndTime', 0)
vidDict['vidnetDuration'] = vidDict['vidnetEndTime'] - row['vidnetStartTime']
if aatLog.get('playPlayingTime', 0) > row['playTime']:
vidDict['playTime'] = aatLog['playPlayingTime']
if aatLog.get('playSeekCount', 0) > row['seekCnt']:
vidDict['seekCnt'] = aatLog['playSeekCount']
if aatLog.get('playSeekForwardCount', 0) > row['ffCnt']:
vidDict['ffCnt'] = aatLog['playSeekForwardCount']
if aatLog.get('playSeekRewindCount', 0) > row['rwCnt']:
vidDict['rwCnt'] = aatLog['playSeekRewindCount']
if aatLog.has_key('netConnectedNetworkCount'):
if row['connectedNetCnt'] < aatLog['netConnectedNetworkCount']:
vidDict['connectedNetCnt']=aatLog['netConnectedNetworkCount']
elif aatLog.has_key('netConnectivityCount'):
if row['connectedNetCnt'] < aatLog['netConnectivityCount']:
vidDict['connectedNetCnt']=aatLog['netConnectivityCount']
if psmode in [1, 4, 5]:
vidDict['pauseCnt'] = aatLog.get('playBufferingCount', 0)
vidDict['resumeCnt'] = aatLog.get('playResumeCount', 0)
if aatLog.get('playAccBufferingTime', 0) > row['pauseTime']:
vidDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
if aatLog.get('playMaxBufferingTime', 0) > row['maxPauseTime']:
vidDict['maxPauseTime'] = aatLog.get('playMaxBufferingTime', 0)
if aatLog.get('trafficAgentMoBytes', 0) > row['cellRxBytes']:
vidDict['cellRxBytes'] = aatLog['trafficAgentMoBytes']
if aatLog.get('trafficAgentWFBytes', 0) > row['wfRxBytes']:
vidDict['wfRxBytes'] = aatLog['trafficAgentWFBytes']
vidDict['cellAvgTP'] = round(aatLog.get('trafficAgentMoAveBW',0), 4)
vidDict['wfAvgTP'] = round(aatLog.get('trafficAgentWFAveBW',0), 4)
if vidDict['cellAvgTP'] > 0:
vidDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if vidDict['wfAvgTP'] > 0:
vidDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
vidDict['batteryEnd'] = batteryEnd
#get appSessionID for vidsession
strSQL = """SELECT sessionID FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and startTime < %d and startTime > 0 and ((endTime+5) > %d or statAppss > '0') ORDER BY sessionID DESC LIMIT 1""" % (aatLog['deviceID'],
aatLog['pkgName'], aatLog['sID'], aatLog['agentLogEndTime'], aatLog['agentLogEndTime'])
ret = waveCursor.execute(strSQL)
if ret > 0:
aarow = waveCursor.fetchone()
if aarow['sessionID'] > '':
vidDict['appSessionIDEnd'] = aarow['sessionID']
#vidsession_log values
if aatLog.get('playPlayingTime', 0) > row['playTime']:
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0) - row['playTime']
else:
vidLogDict['playTime'] = 0
if aatLog.get('playAccBufferingTime', 0) > row['pauseTime']:
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0) - row['pauseTime']
else:
vidLogDict['pauseTime'] = 0
if aatLog.get('playPreparingTime', 0) > row['elapsedTime']:
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0) - row['elapsedTime']
else:
vidLogDict['elapsedTime'] = 0
if aatLog.get('trafficAgentMoBytes', 0) > row['cellRxBytes']:
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0) - row['cellRxBytes']
else:
vidLogDict['cellRxBytes'] = 0
if aatLog.get('trafficAgentWFBytes', 0) > row['wfRxBytes']:
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0) - row['wfRxBytes']
else:
vidLogDict['wfRxBytes'] = 0
if vidDict['cellAvgTP'] > 0 and vidDict['cellDuration'] > row['cellDuration']:
vidLogDict['cellDuration'] = vidDict['cellDuration'] - row['cellDuration']
else:
vidLogDict['cellDuration'] = 0
if vidDict['wfAvgTP'] > 0 and vidDict['wfDuration'] > row['wfDuration']:
vidLogDict['wfDuration'] = vidDict['wfDuration'] - row['wfDuration']
else:
vidLogDict['wfDuration'] = 0
elif row['bpsid'] == '': # The log is the first of this playSession
if len(cellid) > 6:
vidDict['cellIdSt'] = cellid
vidDict['startLogType'] = logType
vidDict['vidnetStartTime'] = aatLog.get('agentLogStartTime', 0)
vidDict['vidnetDuration'] = row['vidnetEndTime'] - vidDict['vidnetStartTime']
vidDict['batteryStart'] = batteryStart
if appSessionId > '':
vidDict['appSessionIDSt'] = appSessionId
#vidsession_log values
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0)
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0)
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0)
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0)
if round(aatLog.get('trafficAgentMoAveBW',0), 4) > 0:
vidLogDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
else:
vidLogDict['cellDuration'] = 0
if round(aatLog.get('trafficAgentWFAveBW',0), 4) > 0:
vidLogDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
else:
vidLogDict['wfDuration'] = 0
updateVidLog(waveCursor, vidLogDict, row)
else: # The log is middle of this playSession
if aatLog.get('playPlayingTime', 0) > row['mPlayTime']:
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0) - row['mPlayTime']
else:
vidLogDict['playTime'] = 0
if aatLog.get('playAccBufferingTime', 0) > row['mPauseTime']:
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0) - row['mPauseTime']
else:
vidLogDict['pauseTime'] = 0
if aatLog.get('playPreparingTime', 0) > row['mElapsedTime']:
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0) - row['mElapsedTime']
else:
vidLogDict['elapsedTime'] = 0
if aatLog.get('trafficAgentMoBytes', 0) > row['mCellBytes']:
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0) - row['mCellBytes']
else:
vidLogDict['cellRxBytes'] = 0
if aatLog.get('trafficAgentWFBytes', 0) > row['mWFBytes']:
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0) - row['mWFBytes']
else:
vidLogDict['wfRxBytes'] = 0
vidLogDict['cellDuration'] = 0
vidLogDict['wfDuration'] = 0
if round(aatLog.get('trafficAgentMoAveBW',0), 4) > 0:
tempdur = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if tempdur > row['mCellDur']:
vidLogDict['cellDuration'] = tempdur - int(row['mCellDur'])
if round(aatLog.get('trafficAgentWFAveBW',0), 4) > 0:
tempdur = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
if tempdur > row['mWFDur']:
vidLogDict['wfDuration'] = tempdur - int(row['mWFDur'])
updateVidLog(waveCursor, vidLogDict, row)
# process independent attributes not depending on log-order
if psmode in [2, 3]:
if logType == 6:
vidDict['pauseCnt'] = row['pauseCnt'] + 1
elif logType == 7:
vidDict['resumeCnt'] = row['resumeCnt'] + 1
if logType in [5, 8]:
if netType == '0': #WIFI
vidDict['netW2CTransferCnt'] = row['netW2CTransferCnt'] + 1
elif netType == '1': #mobile
vidDict['netC2WTransferCnt'] = row['netC2WTransferCnt'] + 1
if row['batteryValid'] == '1' and batteryValid == 0:
vidDict['batteryValid'] = '0'
if aatLog.get('netCellState', -1) > 0 and row['netAllowCell'] == '1': #The log not allow cell, as-is fully allowed.
vidDict['netAllowCell'] = '2'
elif aatLog.get('netCellState', -1) == 0 and row['netAllowCell'] == '0': #The log allow cell, as-is not allowed at all.
vidDict['netAllowCell'] = '2'
vidDict['bbCount'] = row['bbCount'] + aatLog.get('bbCount', 0)
if row['elapsedTime'] == 0 and aatLog.get('playPreparingTime', 0) > 0:
vidDict['elapsedTime'] = aatLog['playPreparingTime']
elif row['mLogType'] == 10 and row['mBufferState'] == '2' and aatLog.get('playPreparingTime', 0) > 0:
vidDict['elapsedTime'] = row['elapsedTime'] + aatLog['playPreparingTime']
#insert tables
vidDict['playSessionID'] = row['playSessionID']
updateVidnet(waveCursor, vidDict)
insertVidnetLog(waveCursor, vidLogDict)
except Exception, e:
log.error("vidupdate %s" % e)
log.error(aatLog)
log.error(vidDict)
raise e
def vidcreate(waveCursor, aatLog):
try:
vidDict = {}
vidLogDict = {}
#get some values to use
cellid = "%s_%s_%s" % (aatLog.get('confOperator', ''), aatLog.get('netCID', ''), aatLog.get('netLAC', ''))
psmode = int(aatLog['playServiceMode'])
logType = aatLog.get('agentLogType', -1)
if aatLog.get('netActiveNetwork', '').find('WIFI') >= 0:
netType = '0'
elif aatLog.get('netActiveNetwork', '').find('mobile') >= 0:
netType = '1'
else:
netType = '2'
vidDict['playSessionID'] = aatLog['playSessionId']
vidDict['androidID'] = aatLog.get('deviceID', '')
vidDict['vID'] = aatLog.get('vID', '')
vidDict['sID'] = aatLog.get('sID', 0)
vidDict['verCode'] = aatLog.get('verCode', 0)
vidDict['osVer'] = aatLog.get('osVer', '')
vidDict['brand'] = aatLog.get('brand', '')
vidDict['model'] = aatLog.get('model', '')
vidDict['cellIdSt'] = cellid
vidDict['cellIdEnd'] = cellid
vidDict['bMao'] = int(aatLog.get('agentAatOnOff', -1))
vidDict['bAnsAllow'] = int(aatLog.get('agentAllowAns', -1))
vidDict['bCellAllow'] = int(aatLog.get('agentAllowMobile', -1))
vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
vidDict['agentUserSetup'] = aatLog.get('agentUserSetup', '')
#vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
vidDict['hostName'] = aatLog.get('playHost', '')
vidDict['originName'] = aatLog.get('playOrigin', '')
vidDict['contentID'] = aatLog.get('playContentId', '')
vidDict['playServiceMode'] = psmode
if psmode == 1:
vidDict['contentSize'] = aatLog.get('vodContentSize', 0)
elif psmode == 4:
vidDict['contentSize'] = aatLog.get('audContentSize', 0)
elif psmode == 5:
vidDict['contentSize'] = aatLog.get('adnContentSize', 0)
else:
vidDict['contentSize'] = 0
if psmode == 1:
vidDict['contentDuration'] = aatLog.get('vodContentDuration', 0)
elif psmode == 4:
vidDict['contentDuration'] = aatLog.get('audContentDuration', 0)
elif psmode == 5:
vidDict['contentDuration'] = aatLog.get('adnDownloadTime', 0)
else:
vidDict['contentDuration'] = 0
if psmode in [2,3]:
vidDict['contentBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
else:
vidDict['contentBitrate'] = 0
#vidDict['channelName'] = aatLog.get('playTitle', '').encode('utf-8')
vidDict['channelName'] = aatLog.get('playTitle', '')
vidDict['pkgnm'] = aatLog.get('pkgName', '')
vidDict['apppkgnm'] = ""
vidDict['appvercd'] = ""
if(aatLog.has_key('playAppPackageName')):
appPkgs = aatLog['playAppPackageName'].split('/')
if len(appPkgs) >= 2:
vidDict['apppkgnm'] = appPkgs[0]
vidDict['appvercd'] = appPkgs[1]
if aatLog.has_key('netConnectedNetworkCount'):
vidDict['connectedNetCnt']=aatLog['netConnectedNetworkCount']
elif aatLog.has_key('netConnectivityCount'):
vidDict['connectedNetCnt']=aatLog['netConnectivityCount']
else:
vidDict['connectedNetCnt']=0
vidDict['abrBitrateList'] = aatLog.get('playBitrateList', '')
vidDict['abrUserSelBR'] = aatLog.get('userSelectBitrate', '')
if psmode == 5:
vidDict['vidnetType'] = aatLog.get('adnStartCode', 0)
vidDict['adnMode'] = aatLog.get('adnMode', '')
vidDict['adnRangeStart'] = aatLog.get('adnContentRangeStart', 0)
vidDict['adnDownSize'] = aatLog.get('adnDownloadSize', 0)
vidDict['adnContentID'] = aatLog.get('adnContentID', 0)
vidDict['startLogType'] = logType
vidDict['endLogType'] = logType
vidDict['vidnetStartTime'] = aatLog.get('agentLogStartTime', 0)
vidDict['vidnetEndTime'] = aatLog.get('agentLogEndTime', 0)
vidDict['vidnetDuration'] = vidDict['vidnetEndTime'] - vidDict['vidnetStartTime']
# process independent attributes not depending on log-order
vidDict['pauseCnt'] = 0
vidDict['resumeCnt'] = 0
vidDict['netW2CTransferCnt'] = 0
vidDict['netC2WTransferCnt'] = 0
if psmode in [2, 3]:
if logType == 6:
vidDict['pauseCnt'] = 1
elif logType == 7:
vidDict['resumeCnt'] = 1
elif psmode in [1, 4, 5]:
vidDict['pauseCnt'] = aatLog.get('playBufferingCount', 0)
vidDict['resumeCnt'] = aatLog.get('playResumeCount', 0)
if logType in [5, 8]:
if netType == '0': #WIFI
vidDict['netW2CTransferCnt'] = 1
elif netType == '1': #mobile
vidDict['netC2WTransferCnt'] = 1
vidDict['playTime'] = aatLog.get('playPlayingTime', 0)
vidDict['seekCnt'] = aatLog.get('playSeekCount', 0)
vidDict['ffCnt'] = aatLog.get('playSeekForwardCount', 0)
vidDict['rwCnt'] = aatLog.get('playSeekRewindCount', 0)
vidDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
vidDict['maxPauseTime'] = aatLog.get('playMaxBufferingTime', 0)
vidDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0)
vidDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0)
vidDict['cellAvgTP'] = round(aatLog.get('trafficAgentMoAveBW',0), 4)
vidDict['wfAvgTP'] = round(aatLog.get('trafficAgentWFAveBW',0), 4)
vidDict['cellDuration'] = 0
vidDict['wfDuration'] = 0
vidDict['cellSysRxBytes'] = aatLog.get('trafficSystemMoRxBytes', 0)
vidDict['wfSysRxBytes'] = aatLog.get('trafficSystemWFRxBytes', 0)
if vidDict['cellAvgTP'] > 0:
vidDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if vidDict['wfAvgTP'] > 0:
vidDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
batteryStart = 0
batteryEnd = 0
batteryValid = '0'
if aatLog.has_key('batteryInfo'):
btList = aatLog['batteryInfo'].split('|')
if len(btList) == 2:
if len(btList[0].split('/')) >= 5 and len(btList[1].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
nTotLevel = float(btList[1].split('/')[3])
nBatLevel = float(btList[1].split('/')[4])
batteryEnd = (nBatLevel/nTotLevel)*100
if btList[1].split('/')[1] == 'DISCHARGING': #All batteryInfo reporting log must be 'DISCHARGING' except first.
batteryValid = 1
else:
batteryValid = 0
elif len(btList) == 1:
if len(btList[0].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
batteryEnd = batteryStart
batteryValid = 0
vidDict['batteryStart'] = batteryStart
vidDict['batteryEnd'] = batteryEnd
vidDict['batteryValid'] = str(batteryValid)
if aatLog.get('netCellState', -1) > 0:
vidDict['netAllowCell'] = '0'
elif aatLog.get('netCellState', -1) == 0:
vidDict['netAllowCell'] = '1'
vidDict['bbCount'] = aatLog.get('bbCount', 0)
vidDict['elapsedTime'] = aatLog.get('playPreparingTime', 0)
#get appSessionID
vidDict['appSessionIDSt'] = ''
vidDict['appSessionIDEnd'] = ''
strSQL = """SELECT MAX(1) as ord, MAX(sessionID) as sessionID FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and (startTime - 5) <= %d and startTime > 0 and (endTime > %d or statAppss > '0')
UNION ALL
SELECT MAX(2), MAX(sessionID) FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and startTime < %d and startTime > 0 and ((endTime + 5) > %d or statAppss > '0')
""" % (aatLog['deviceID'], aatLog['pkgName'], aatLog['sID'], aatLog['agentLogStartTime'], aatLog['agentLogStartTime'],
aatLog['deviceID'], aatLog['pkgName'], aatLog['sID'], aatLog['agentLogEndTime'], aatLog['agentLogEndTime'])
ret = waveCursor.execute(strSQL)
if ret > 0:
aarows = waveCursor.fetchall()
for r in aarows:
if r['sessionID'] > '' and r['sessionID'] <> None:
if r['ord'] == 1:
vidDict['appSessionIDSt'] = r['sessionID']
elif r['ord'] == 2:
vidDict['appSessionIDEnd'] = r['sessionID']
#vidsession_log values
getVidLogStatic(vidLogDict, aatLog, vidDict['appSessionIDSt'], netType)
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0)
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0)
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0)
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0)
vidLogDict['cellSysRxBytes'] = aatLog.get('trafficSystemMoRxBytes', 0)
vidLogDict['wfSysRxBytes'] = aatLog.get('trafficSystemWFRxBytes', 0)
vidLogDict['cellDuration'] = 0
vidLogDict['wfDuration'] = 0
if round(aatLog.get('trafficAgentMoAveBW',0), 4) > 0:
vidLogDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if round(aatLog.get('trafficAgentWFAveBW',0), 4) > 0:
vidLogDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
#insert tables
insertVidnet(waveCursor, vidDict)
insertVidnetLog(waveCursor, vidLogDict)
except Exception, e:
log.error("vidcreate %s" % e)
log.error(aatLog)
raise e
#Following function is not related to Open DD. You should ignore it
def getBBinfo(rBBList, aatLog):
try:
if aatLog.has_key('bbCount') == False or aatLog.has_key('bbList') == False:
return
BBcount = aatLog['bbCount']
if BBcount == 0:
return
elif BBcount > 40:
BBcount = 40
if isinstance(aatLog['bbList'], list):
bblst = aatLog['bbList'][0:BBcount]
else:
bblst = aatLog['bbList'].strip('[ ]').split(',')
bblst = bblst[0:BBcount]
bblst = bblst[0:BBcount]
bbdict = {}
for bbItem in bblst:
if bbItem.find('|') < 0: continue
bbElm = bbItem.strip(" u'\"").split('|')
bbdict['psid'] = aatLog['playSessionId']
bbdict['bb'] = list(bbElm)
rBBList.append(bbdict.copy())
except Exception, e:
log.error("getBBinfo error:%s" % e)
log.error(aatLog)
raise e
#Following function is not related to Open DD. You should ignore it
def insertBBSQL(waveCursor, bbList):
try:
strLst = []
for bbElm in bbList:
if len(bbElm['bb']) == 8 and bbElm['bb'][7] == 'e':
strValue = "('%s', %s, %s, '%s', '%s', %s, %s, %s, unix_timestamp())" % (bbElm['psid'], bbElm['bb'][0], bbElm['bb'][2], \
'{0:02d}'.format(int(bbElm['bb'][1])), '{0:02d}'.format(int(bbElm['bb'][3])), bbElm['bb'][4], bbElm['bb'][5], bbElm['bb'][6])
elif len(bbElm['bb']) == 6:
strValue = "('%s', %s, %s, '%s', '%s', %s, %s, NULL, unix_timestamp())" % (bbElm['psid'], bbElm['bb'][0], bbElm['bb'][2], \
'{0:02d}'.format(int(bbElm['bb'][1])), '{0:02d}'.format(int(bbElm['bb'][3])), bbElm['bb'][4], bbElm['bb'][5])
else:
log.warn("BBList format error:")
log.warn(bbElm)
continue
strLst.append(strValue)
if len(strLst) > 0:
sql = """insert into vidsession_bb (playSessionID, stTime, endTime, stCode, endCode, trWF, trCell, stBBTime, lstuptmp)
values %s
on duplicate key update endTime = values(endTime), stCode = values(stCode), trWF = values(trWF), trCell = values(trCell), stBBTime = values(stBBTime), lstuptmp = unix_timestamp()
""" % ', '.join(strLst)
ret = waveCursor.execute(sql)
if ret == 0:
log.warn("insertBBSQL no record affected [%s]" % sql)
except Exception, e:
log.error("insertBBSQL error:%s" % e)
log.error(bbList)
raise e
#Following function is not related to Open DD. You should ignore it
def getNetConn(rstNetList, aatLog):
try:
if aatLog.has_key('netConnectivityList') == False or len(aatLog['netConnectivityList']) == 0:
return
try:
# log.info(type(aatLog['netConnectivityList']))
# log.info(aatLog['netConnectivityList'])
if type(aatLog['netConnectivityList']) == list:
netlst = aatLog['netConnectivityList']
else:
netlst = json.loads(aatLog['netConnectivityList'])
except Exception, e:
netlst = aatLog['netConnectivityList'].strip('[ ]').split(',')
for netItem in netlst:
if netItem == None:
break
if netItem.find('|') < 0:
if len(netItem) > 0:
continue
else:
break
netElm = netItem.strip(" u'\"").replace("||", "|").split('|')
netDict = {}
if netElm[2].find('WIFI') >= 0:
if len(netElm) == 7:
netDict['playSessionID'] = aatLog['playSessionId']
netDict['stTime'] = netElm[0]
netDict['ntype'] = 'w'
netDict['bssid'] = netElm[4]
netDict['ssid'] = netElm[3].replace("'", "''")
netDict['traffic'] = netElm[6]
rstNetList.append(netDict.copy())
else:
if len(netElm) == 5:
netDict['playSessionID'] = aatLog['playSessionId']
netDict['stTime'] = netElm[0]
netDict['ntype'] = 'm'
netDict['traffic'] = netElm[4]
rstNetList.append(netDict.copy())
except Exception, e:
log.error("getNetconnInfo error:[%s]%s" % (type(e), e))
log.error(aatLog)
raise e
#Following function is not related to Open DD. You should ignore it
def insertNetInfo(waveCursor, netList):
try:
strLst = []
for netElm in netList:
if netElm['ntype'] == 'w':
strValue = "('%s', %s, '%s', '%s', '%s', %s, unix_timestamp())" % \
(netElm['playSessionID'], netElm['stTime'], netElm['ntype'], \
netElm['bssid'], netElm['ssid'], netElm['traffic'])
else:
strValue = "('%s', %s, '%s', NULL, NULL, %s, unix_timestamp())" % \
(netElm['playSessionID'], netElm['stTime'], netElm['ntype'], netElm['traffic'])
strLst.append(strValue)
if len(strLst) > 0:
sql = """insert into vidsession_net (playSessionID, stTime, ntype, bssid, ssid, traffic, lstuptmp)
values %s
on duplicate key update ntype = values(ntype), bssid = values(bssid), ssid = values(ssid), traffic = values(traffic), lstuptmp = unix_timestamp()
""" % ', '.join(strLst)
ret = waveCursor.execute(sql)
if ret == 0:
log.warn("insertNetInfo no record affected [%s]" % sql)
except Exception, e:
log.error("insertNetInfo error:%s" % e)
log.error(strLst)
raise e
def insertVidnet(waveCursor, vidDict):
if vidDict == None:
log.warn('vidnetDict is null')
return False
cols = vidDict.keys()
vals = vidDict.values()
try:
slist = []
for v in vals:
if type(v) == str or type(v) == unicode:
slist.append("'" + v.replace("'", "''") + "'")
else:
slist.append(str(v))
sql = """insert into vidsession (%s, lstuptmp) values (%s, unix_timestamp())""" % (",".join(cols), unicode(",", "utf-8").join(slist))
waveCursor.execute(sql)
except Exception, e:
log.error("INSERT VIDNET ERROR:%s" % e)
log.error(vidDict)
raise e
def insertVidnetLog(waveCursor, vidLogDict):
if vidLogDict == None:
log.warn('vidLogDict is null')
return False
cols = vidLogDict.keys()
vals = vidLogDict.values()
try:
sql = """insert into vidsession_log (%s, lstuptmp) values (%s, unix_timestamp())""" % (",".join(cols), ",".join(["'" + str(val).replace("'", "''") + "'" for val in vals]))
waveCursor.execute(sql)
except Exception, e:
log.error("INSERT vidsession_log ERROR:%s" % e)
log.error(vidLogDict)
raise e
def updateVidnet(waveCursor, vidDict):
if vidDict == None:
log.warn('updateVidnet : vidDict is null')
return False
playSessionID = vidDict.pop('playSessionID')
cols = vidDict.keys()
vals = vidDict.values()
try:
slist = []
for key in vidDict:
if type(vidDict[key]) == str or type(vidDict[key]) == unicode:
s = "%s = '%s'" % (key, vidDict[key].replace("'", "''"))
else:
s = "%s = %s" % (key, str(vidDict[key]))
slist.append(s)
slist.append("lstuptmp = unix_timestamp()")
#sql = "UPDATE vidsession SET %s WHERE playSessionID = '%s'" % (unicode(',', 'utf-8').join(map(lambda key:"%s='%s'" % (key, unicode(vidDict[key], 'utf-8').replace("'", "''")), vidDict)), playSessionID)
sql = "UPDATE vidsession SET %s WHERE playSessionID = '%s'" % (unicode(',', 'utf-8').join(slist), playSessionID)
waveCursor.execute(sql)
except Exception, e:
log.error("update vidsession ERROR:%s, playSessionID:%s" % (e, playSessionID))
log.error(vidDict)
raise e
###############################################################################################################
######################## PROCESS By ONE AATLOG routine ############################################
###############################################################################################################
#
#Open DD processing only include following items in aatLog
#The others should be ignored.
#
# 'log_time', 'abrMode', 'agentAatOnOff',
# 'agentLogEndTime', 'agentLogStartTime', 'agentLogType', 'bbCount',
# 'bbList', 'brand', 'confOperator', 'deviceID', 'liveCurrentTSBitrate', 'model', 'netActiveNetwork', 'netCellState',
# 'netCID', 'netLAC', 'numTotalHits', 'osVer', 'pkgName', 'playAccBufferingTime',
# 'playAppPackageName', 'playContentId', 'playHost',
# 'playOrigin', 'playPlayingTime', 'playPreparingTime',
# 'playServiceMode', 'playSessionId', 'playTitle', 'requestBR', 'sID',
# 'trafficAgentMoAveBW', 'trafficAgentMoBytes', 'trafficAgentWFAveBW', 'trafficAgentWFBytes', 'trafficSystemMoRxBytes',
# 'trafficSystemWFRxBytes', 'playEndState', 'tTM', 'verCode', 'vID'
#
###############################################################################################################
class ProcessAATLog(object):
# for debug temp
OW_TASK_SUBSCRIBE_EVENTS = ['evtPlayerLog']
# OW_TASK_SUBSCRIBE_EVENTS = []
# for debug temp
OW_TASK_PUBLISH_EVENTS = []
OW_USE_HASHING = False
OW_HASH_KEY = None
OW_NUM_WORKER = 16
def publishEvent(self, event, params):
# THIS METHOD WILL BE OVERRIDE
# DO NOT EDIT THIS METHOD
pass
def handler(self, aatLog):
try:
waveCursor = None
#update apmain.mdev's plmnid for hoppin case
if aatLog.get('confOperator', '') > '':
updateMcc(aatLog.get('deviceID', ''), aatLog['confOperator'])
#in case of Off Log, process only End(0) log
if int(aatLog.get('agentAatOnOff', -1)) == 0:
if aatLog.get('agentLogType', -1) <> 0:
return
curAnID = aatLog.get('deviceID').strip(" u'")
curPkgname = aatLog.get('pkgName').strip(" u'")
curPsID = aatLog.get('playSessionId', '').strip(" u'")
curTTM = Decimal(aatLog.get('tTM', 0.0))
curEndTm = int(aatLog.get('agentLogEndTime', 0))
waveCursor = worker.dbmanager.allocDictCursor('myapwave')
waveCursor.execute("START TRANSACTION")
strSQL= None
strSQL = """SELECT a.*, IFNULL(b.psid, '') AS bpsid, b.*, e.* FROM
(SELECT m.*, MAX(n.tTM) AS maxTTM, MAX(IF(n.tTM = %.3f, 1, 0) ) AS bExist
FROM vidsession m LEFT OUTER JOIN vidsession_log n ON m.playSessionID = n.playSessionID
WHERE m.playSessionID = '%s') a LEFT OUTER JOIN
(SELECT playSessionID AS psid, MAX(tTM) AS lstTTM,
SUBSTR(MAX(CONCAT(RPAD(tTM, 14, '0'), logType)), 15) AS mLogType,
SUBSTR(MAX(CONCAT(RPAD(tTM, 14, '0'), logEndTime)), 15) AS mLogEndTime,
SUBSTR(MAX(CONCAT(RPAD(tTM, 14, '0'), IFNULL(bufferState, '0'))), 15) AS mBufferState,
SUM(playTime) AS mPlayTime, SUM(pauseTime) AS mPauseTime,
SUM(elapsedTime) AS mElapsedTime, SUM(cellRxBytes) AS mCellBytes, SUM(wfRxBytes) AS mWFBytes,
SUM(cellDuration) AS mCellDur, SUM(wfDuration) AS mWFDur
FROM vidsession_log
WHERE playSessionID = '%s' AND tTM < %.3f) b
ON a.playSessionID = b.psid
LEFT OUTER JOIN
(SELECT playSessionID AS psid, MIN(tTM) AS nextTTM
FROM vidsession_log
WHERE playSessionID = '%s' AND tTM > %.3f ) e
ON a.playSessionID = e.psid
""" % (curTTM, curPsID, curPsID, curTTM, curPsID, curTTM)
ret = waveCursor.execute(strSQL)
if ret > 0:
row = waveCursor.fetchone()
if row['playSessionID'] <> None:
if row['bExist'] == 1:
return
else:
vidupdate(waveCursor, aatLog, row)
else:
vidcreate(waveCursor, aatLog)
else: # Insert new playsession
vidcreate(waveCursor, aatLog)
# get BB, BW
#Following code is not related to Open DD. You should ignore it.
#### BEGIN - IGNORE
logSubList = []
getBBinfo(logSubList , aatLog)
insertBBSQL(waveCursor, logSubList)
logSubList = []
getNetConn(logSubList, aatLog)
insertNetInfo(waveCursor, logSubList)
#### END - IGNORE
waveCursor.execute("COMMIT")
except Exception, e:
log.error("processAATLOG : %s" % e)
log.error(aatLog)
if strSQL <> None:
log.error(strSQL)
if waveCursor <> None:
waveCursor.execute("ROLLBACK")
if str(e).find('Deadlock')> 0:
log.error("processAAATLog raise e")
raise e
finally:
if waveCursor <> None:
worker.dbmanager.freeCursor(waveCursor)
|
apache-2.0
| -2,276,425,715,459,409,000
| 38.855741
| 211
| 0.637161
| false
| 2.839871
| false
| false
| false
|
whitesmith/hawkpost
|
humans/migrations/0007_notification.py
|
1
|
1205
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-27 15:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
('humans', '0006_user_server_signed'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=150)),
('body', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('sent_at', models.DateTimeField(null=True)),
('send_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Group')),
],
options={
'verbose_name': 'Notification',
'verbose_name_plural': 'Notifications',
},
),
]
|
mit
| 1,414,934,465,553,693,700
| 35.515152
| 132
| 0.570124
| false
| 4.16955
| false
| false
| false
|
shivupa/pyci
|
methods/misc/asci_old2.py
|
1
|
6642
|
import scipy as sp
import scipy.linalg as spla
import scipy.sparse.linalg as splinalg
import numpy as np
from functools import reduce
import pyscf
import itertools
import h5py
from pyscf import gto, scf, ao2mo, fci
import pyscf.tools as pt
import copy
import matplotlib.pyplot as plt
from utils import *
#############
# INPUT
#############
#TODO: implement function that finds particles/holes based on set operations (will be easier with aocc,bocc lists of indices instead of docc,aocc(single),bocc(single)
np.set_printoptions(precision=4,suppress=True)
mol = gto.M(
atom = [['O', (0.000000000000, -0.143225816552, 0.000000000000)],
['H', (1.638036840407, 1.136548822547, -0.000000000000)],
['H', (-1.638036840407, 1.136548822547, -0.000000000000)]],
basis = 'STO-3G',
verbose = 1,
unit='b',
symmetry=True
)
Na,Nb = mol.nelec #nelec is a tuple with (N_alpha, N_beta)
nao=mol.nao_nr()
s = mol.intor('cint1e_ovlp_sph')
t = mol.intor('cint1e_kin_sph')
v = mol.intor('cint1e_nuc_sph')
h=t+v
printroots=4
#############
# FUNCTIONS
#############
""" TODO: remove this?def create_PYSCF_fcidump():
myhf = scf.RHF(mol)
E = myhf.kernel()
c = myhf.mo_coeff
h1e = reduce(np.dot, (c.T, myhf.get_hcore(), c))
eri = ao2mo.kernel(mol, c)
pt.fcidump.from_integrals('fcidump.txt', h1e, eri, c.shape[1],mol.nelectron, ms=0)
cisolver = fci.FCI(mol, myhf.mo_coeff)
print('E(HF) = %.12f, E(FCI) = %.12f' % (E,(cisolver.kernel()[0] + mol.energy_nuc())))
"""
def amplitude(det,excitation):
return 0.1
#############
# INITIALIZE
#############
myhf = scf.RHF(mol)
E = myhf.kernel()
c = myhf.mo_coeff
#if you change the sign of these two orbitals, the hamiltonian matrix elements agree with those from GAMESS
#c.T[2]*=-1
#c.T[5]*=-1
cisolver = fci.FCI(mol, c)
#print('PYSCF E(FCI) = %.12f' % (cisolver.kernel()[0] + mol.energy_nuc()))
efci = cisolver.kernel(nroots=printroots)[0] + mol.energy_nuc()
h1e = reduce(np.dot, (c.T, myhf.get_hcore(), c))
eri = ao2mo.kernel(mol, c)
cdets = 25
tdets = 50
threshold = 1e-13 #threshold for hii and hij
#use eri[idx2(i,j),idx2(k,l)] to get (ij|kl) chemists' notation 2e- ints
#make full 4-index eris in MO basis (only for testing idx2)
#eri_mo = ao2mo.restore(1, eri, nao)
#eri in AO basis
#eri_ao = mol.intor('cint2e_sph')
#eri_ao = eri_ao.reshape([nao,nao,nao,nao])
#print h1e
#print eri
#print np.shape(h1e),np.shape(eri)
#print mol.nelectron, np.shape(h1e)[0]*2
num_orbs=2*nao
num_occ = mol.nelectron
num_virt = num_orbs - num_occ
#bitstring = "1"*num_occ
#bitstring += "0"*num_virt
#print(bitstring)
#starting_amplitude =1.0
#original_detdict = {bitstring:starting_amplitude}
H_core = np.array((cdets,cdets))
H_target = np.array((tdets,tdets))
#generate all determinants
fulldetlist_sets=gen_dets_sets(nao,Na,Nb)
ndets=len(fulldetlist_sets)
#start with HF determinant
original_detdict = {fulldetlist_sets[0]:1.0}
#lists for csr sparse storage of hamiltonian
#if this is just for storage (and not diagonalization) then we can use a dict instead (or store as upper half of sparse matrix)
hrow=[]
hcol=[]
hval=[]
for i in range(ndets):
idet=fulldetlist_sets[i]
hii = calc_hii_sets(idet,h1e,eri)
if abs(hii)>threshold: #we probably don't need this
hrow.append(i)
hcol.append(i)
hval.append(hii)
for j in range(i+1,ndets):
jdet=fulldetlist_sets[j]
nexc_ij = n_excit_sets(idet,jdet)
if nexc_ij in (1,2):
if nexc_ij==1:
hij = calc_hij_single_sets(idet,jdet,h1e,eri)
else:
hij = calc_hij_double_sets(idet,jdet,h1e,eri)
if abs(hij)>threshold:
hrow.append(i)
hrow.append(j)
hcol.append(j)
hcol.append(i)
hval.append(hij)
hval.append(hij)
fullham=sp.sparse.csr_matrix((hval,(hrow,hcol)),shape=(ndets,ndets))
#hamiltonian_heatmap(fullham);
#print(len(fulldetlist_sets))
eig_vals,eig_vecs = sp.sparse.linalg.eigsh(fullham,k=2*printroots)
eig_vals_sorted = sorted(eig_vals)[:printroots] + mol.energy_nuc()
eig_vals_gamess = [-75.0129802245,
-74.7364625517,
-74.6886742417,
-74.6531877287]
print("first {:} pyci eigvals vs PYSCF eigvals".format(printroots))
for i,j in zip(eig_vals_sorted, efci):
print(i,j)
#############
# MAIN LOOP
#############
# a^dagger_i a_j |psi>
temp_detdict = {}
temp_double_detdict = {}
new_detdict = copy.deepcopy(original_detdict)
#print(temp_detdict)
for det in original_detdict:
occ_index = []
virt_index = []
count = 0
for i in det:
if i == "1":
occ_index.append(count)
else:
virt_index.append(count)
count +=1
#print(occ_index)
#print(virt_index)
for i in occ_index:
for j in virt_index:
temp_det = list(det)
temp_det[i] = "0"
temp_det[j] = "1"
temp_det = ''.join(temp_det)
temp_detdict[temp_det] = 0.1
#print temp_det, temp_amplitude
for k in occ_index:
for l in virt_index:
if k>i and l>j:
temp_double_det = list(det)
temp_double_det[i] = "0"
temp_double_det[j] = "1"
temp_double_det[k] = "0"
temp_double_det[l] = "1"
temp_double_det = ''.join(temp_double_det)
temp_double_detdict[temp_double_det] = 0.3
for i in temp_detdict:
try:
new_detdict[i] += temp_detdict[i]
except:
new_detdict.update({i:temp_detdict[i]})
for i in temp_double_detdict:
try:
new_detdict[i] += temp_double_detdict[i]
except:
new_detdict.update({i:temp_double_detdict[i]})
#new_detdict.update(temp_double_detdict)
#detdict = {}
#new_detdict.update(original_detdict)
#print("shiv",len(temp_detdict))
#print("shiv",len(temp_double_detdict))
#for i in new_detdict:
#print(i, new_detdict[i])
#print(sorted(new_detdict.items(), key=lambda x: x[1]))
#print(len(new_detdict))
#one of these agrees with gamess and one does not
#print("d_a_b_single(('1111100','1110110'),('1111100','1111100'))")
#d_a_b_single(('1111100','1110110'),('1111100','1111100'))
#print("d_a_b_single(('1111100','1011110'),('1111100','1110110'))")
#print(d_a_b_single(('1111100','1011110'),('1111100','1110110')))
#print("d_a_b_single(('1111100','1110011'),('1111100','1111001'))")
#print(d_a_b_single(('1111100','1110011'),('1111100','1111001')))
|
gpl-3.0
| 6,862,469,402,105,148,000
| 30.932692
| 166
| 0.606896
| false
| 2.703297
| false
| false
| false
|
dvl/cdzforever.net
|
cdzforever/apps/manga/migrations/0002_auto__add_field_pagina_image.py
|
1
|
2062
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pagina.image'
db.add_column(u'manga_pagina', 'image',
self.gf('django.db.models.fields.files.ImageField')(default=None, max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pagina.image'
db.delete_column(u'manga_pagina', 'image')
models = {
u'manga.capitulo': {
'Meta': {'ordering': "('num', 'titulo')", 'object_name': 'Capitulo'},
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {}),
'serie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['manga.Serie']"}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '90'})
},
u'manga.pagina': {
'Meta': {'ordering': "('num',)", 'object_name': 'Pagina'},
'capitulo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['manga.Capitulo']"}),
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'num': ('django.db.models.fields.IntegerField', [], {})
},
u'manga.serie': {
'Meta': {'ordering': "('nome',)", 'object_name': 'Serie'},
'capitulos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '90'})
}
}
complete_apps = ['manga']
|
mit
| 1,814,648,615,557,720,600
| 44.844444
| 120
| 0.549467
| false
| 3.419569
| false
| false
| false
|
vyzyv/university
|
python/Zestaw8/8_3.py
|
1
|
1027
|
from random import uniform
def calc_pi(n=100, r=1):
"""Function calculating approximation of pi number.
Based on Monte Carlo algorithm
Arguments are:
n - number of random numbers generated by uniform distribution
r - radius of a circle"""
circle_counter = 0
for i in range(n):
x, y = uniform(0,2*r), uniform(0,2*r)
#check whether point is inside the circle
if ((x-r)**2 + (y-r)**2) < r**2:
circle_counter += 1
return 4*circle_counter / n
if __name__ == '__main__':
print('Pi approximation with n=%i and r=%f' %(10, 1), calc_pi(10, 1))
print('Pi approximation with n=%i and r=%f' %(100, 1), calc_pi(100, 1))
print('Pi approximation with n=%i and r=%f' %(1000, 1), calc_pi(1000, 1))
print('Pi approximation with n=%i and r=%f' %(10000, 1), calc_pi(10000, 1))
print('Pi approximation with n=%i and r=%f' %(100000, 1), calc_pi(100000, 1))
print('Pi approximation with n=%i and r=%f' %(1000000, 1), calc_pi(1000000, 1))
|
apache-2.0
| 4,753,075,106,202,006,000
| 34.413793
| 83
| 0.594937
| false
| 3.291667
| false
| false
| false
|
dougwig/a10-neutron-lbaas
|
a10_neutron_lbaas/db/models/scaling_group.py
|
1
|
10831
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import inspect
from sqlalchemy.orm import backref, relationship
from a10_neutron_lbaas.db import model_base as models
LOG = logging.getLogger(__name__)
class A10ScalingGroup(models.A10Base):
"""A10 Scaling Group - container of switch and workers"""
__tablename__ = u'a10_scaling_groups'
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=True)
scaling_policy = relationship('A10ScalingPolicy', backref='scaling_groups')
switches = relationship('A10ScalingGroupSwitch')
workers = relationship('A10ScalingGroupWorker')
members = relationship('A10ScalingGroupMember', backref='scaling_group')
__mapper_args__ = {
'polymorphic_identity': __tablename__
}
class A10ScalingGroupBinding(models.A10Base):
__tablename__ = u'a10_scaling_group_bindings'
id = sa.Column(sa.String(36),
primary_key=True,
nullable=False,
default=models._uuid_str)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
scaling_group = relationship(A10ScalingGroup, backref='bindings')
lbaas_loadbalancer_id = sa.Column(sa.String(36),
unique=True,
nullable=False)
class A10ScalingGroupMember(models.A10Base):
"""A10 Scaling Group Member - switch/worker depending on 'role'"""
__tablename__ = "a10_scaling_group_members"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
type = sa.Column(sa.String(50), nullable=False)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
host = sa.Column(sa.String(255), nullable=False)
api_version = sa.Column(sa.String(12), nullable=False)
username = sa.Column(sa.String(255), nullable=False)
password = sa.Column(sa.String(255), nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
port = sa.Column(sa.Integer, nullable=False)
nova_instance_id = sa.Column(sa.String(36), nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
'polymorphic_on': type
}
def add_virtual_server(self, neutron_id, **kwargs):
vs = A10ScalingGroupMemberVirtualServer.create(
neutron_id=neutron_id,
**kwargs)
self.virtual_servers.append(vs)
return vs
def get_virtual_server(self, neutron_id):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServer).\
filter_by(member_id=self.id, neutron_id=neutron_id).\
first()
def delete_virtual_server(self, neutron_id):
vs = self.get_virtual_server(neutron_id)
if vs:
inspect(self).session.delete(vs)
class A10ScalingGroupWorker(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_workers"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupSwitch(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_switches"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupMemberVirtualServer(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_servers"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
member_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
nullable=False)
member = relationship('A10ScalingGroupMember',
backref=backref('virtual_servers', cascade='all, delete-orphan'))
neutron_id = sa.Column(sa.String(36),
nullable=False)
ip_address = sa.Column(sa.String(50), nullable=False)
interface_ip_address = sa.Column(sa.String(50), nullable=True)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
def add_port(self, port, **kwargs):
vs = A10ScalingGroupMemberVirtualServerPort.create(
port=port,
**kwargs)
self.ports.append(vs)
return vs
def get_port(self, port):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServerPort).\
filter_by(virtual_server_id=self.id, port=port).\
first()
def delete_port(self, port):
port = self.get_port(port)
if port:
inspect(self).session.delete(port)
class A10ScalingGroupMemberVirtualServerPort(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_server_ports"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
virtual_server_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_member_virtual_servers.id'),
nullable=False)
virtual_server = relationship('A10ScalingGroupMemberVirtualServer',
backref=backref('ports', cascade='all, delete-orphan'))
port = sa.Column(sa.Integer,
nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
class A10ScalingPolicy(models.A10Base):
__tablename__ = "a10_scaling_policies"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
cooldown = sa.Column(sa.Integer, nullable=False)
min_instances = sa.Column(sa.Integer, nullable=False)
max_instances = sa.Column(sa.Integer, nullable=True)
reactions = relationship('A10ScalingPolicyReaction',
order_by="A10ScalingPolicyReaction.position",
collection_class=ordering_list('position'),
backref='policy')
def scaling_group_ids(self):
return [sg.id for sg in self.scaling_groups]
class A10ScalingPolicyReaction(models.A10Base):
__tablename__ = "a10_scaling_policy_reactions"
# A surrogate key is required by ordering_list
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=False)
position = sa.Column(sa.Integer,
nullable=False)
alarm_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_alarms.id'),
nullable=False)
action_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_actions.id'),
nullable=False)
alarm = relationship('A10ScalingAlarm', backref='reactions')
action = relationship('A10ScalingAction', backref='reactions')
class A10ScalingAlarm(models.A10Base):
__tablename__ = "a10_scaling_alarms"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
aggregation = sa.Column(sa.String(50), nullable=False)
measurement = sa.Column(sa.String(50), nullable=False)
operator = sa.Column(sa.String(50), nullable=False)
threshold = sa.Column(sa.Float(), nullable=False)
unit = sa.Column(sa.String(50), nullable=False)
period = sa.Column(sa.Integer, nullable=False)
period_unit = sa.Column(sa.String(50), nullable=False)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
class A10ScalingAction(models.A10Base):
__tablename__ = "a10_scaling_actions"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
action = sa.Column(sa.String(50), nullable=False)
amount = sa.Column(sa.Integer)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
|
apache-2.0
| -8,164,595,207,262,769,000
| 36.348276
| 96
| 0.599391
| false
| 3.945719
| false
| false
| false
|
ppolewicz/ant-colony
|
antcolony/simulator.py
|
1
|
1673
|
from stats import QueenStats
from ant_move import AntStartMove
from edge import DummyEdgeEnd
class Simulator(object):
def __init__(self, reality, simulation_class, reality_processors):
self.reality = reality
self.simulation_class = simulation_class
self.reality_processors = reality_processors
def simulate(self, queen, amount_of_ants, stats_saver):
ant_classes = queen.spawn_ants(amount_of_ants)
ants = [ant_class(self.reality.environment_parameters) for ant_class in ant_classes]
anthills = self.reality.world.get_anthills()
antmoves = list(self.get_start_antmoves(ants, anthills))
for reality_processor in self.reality_processors:
reality_processor.set_ant_count(len(ants))
antmoves.extend(self.reality_processors)
stats = QueenStats(self.reality, len(ants), stats_saver)
simulation = self.simulation_class(self.reality, antmoves, stats)
return simulation
def get_results(self, simulation):
ticks = simulation.ticks
stats = simulation.stats
elapsed_time = self.reality.world.elapsed_time
return elapsed_time, ticks, stats
def reset(self):
self.reality.world.reset()
for reality_processor in self.reality_processors:
reality_processor.reset()
def get_start_antmoves(self, ants, anthills):
""" iterator """
counter = 0
number_of_anthills = len(anthills)
anthills = list(anthills)
for ant in ants:
anthill = anthills[counter % number_of_anthills]
yield AntStartMove(ant, DummyEdgeEnd(anthill))
counter += 1
|
bsd-3-clause
| 257,776,731,439,886,800
| 41.897436
| 92
| 0.663479
| false
| 3.685022
| false
| false
| false
|
sixu05202004/newsmeme
|
newsmeme/newsmeme/views/account.py
|
1
|
6900
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from flask import Module, flash, request, g, current_app, \
abort, redirect, url_for, session, jsonify
from flask.ext.mail import Message
from flask.ext.babel import gettext as _
from flask.ext.principal import identity_changed, Identity, AnonymousIdentity
from newsmeme.forms import ChangePasswordForm, EditAccountForm, \
DeleteAccountForm, LoginForm, SignupForm, RecoverPasswordForm
from newsmeme.models import User
from newsmeme.helpers import render_template
from newsmeme.extensions import db, mail
from newsmeme.permissions import auth
account = Module(__name__)
@account.route("/login/", methods=("GET", "POST"))
def login():
form = LoginForm(login=request.args.get("login", None),
next=request.args.get("next", None))
# TBD: ensure "next" field is passed properly
if form.validate_on_submit():
user, authenticated = \
User.query.authenticate(form.login.data,
form.password.data)
if user and authenticated:
session.permanent = form.remember.data
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
# check if openid has been passed in
openid = session.pop('openid', None)
if openid:
user.openid = openid
db.session.commit()
flash(_("Your OpenID has been attached to your account. "
"You can now sign in with your OpenID."), "success")
else:
flash(
_("Welcome back, %(name)s", name=user.username), "success")
next_url = form.next.data
if not next_url or next_url == request.path:
next_url = url_for('user.posts', username=user.username)
return redirect(next_url)
else:
flash(_("Sorry, invalid login"), "error")
return render_template("account/login.html", form=form)
@account.route("/signup/", methods=("GET", "POST"))
def signup():
form = SignupForm(next=request.args.get("next"))
if form.validate_on_submit():
user = User()
form.populate_obj(user)
db.session.add(user)
db.session.commit()
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
flash(_("Welcome, %(name)s", name=user.username), "success")
next_url = form.next.data
if not next_url or next_url == request.path:
next_url = url_for('user.posts', username=user.username)
return redirect(next_url)
return render_template("account/signup.html", form=form)
@account.route("/logout/")
def logout():
flash(_("You are now logged out"), "success")
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect(url_for('frontend.index'))
@account.route("/forgotpass/", methods=("GET", "POST"))
def forgot_password():
form = RecoverPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
flash(_("Please see your email for instructions on "
"how to access your account"), "success")
user.activation_key = str(uuid.uuid4())
db.session.commit()
body = render_template("emails/recover_password.html",
user=user)
message = Message(subject=_("Recover your password"),
body=body,
sender=current_app.config.get(
'DEFAULT_MAIL_SENDER'),
recipients=[user.email])
mail.send(message)
return redirect(url_for("frontend.index"))
else:
flash(_("Sorry, no user found for that email address"), "error")
return render_template("account/recover_password.html", form=form)
@account.route("/changepass/", methods=("GET", "POST"))
def change_password():
user = None
if g.user:
user = g.user
elif 'activation_key' in request.values:
user = User.query.filter_by(
activation_key=request.values['activation_key']).first()
if user is None:
abort(403)
form = ChangePasswordForm(activation_key=user.activation_key)
if form.validate_on_submit():
user.password = form.password.data
user.activation_key = None
db.session.commit()
flash(_("Your password has been changed, "
"please log in again"), "success")
# 修改成功后,强制用户退出
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect(url_for("account.login"))
return render_template("account/change_password.html", form=form)
@account.route("/edit/", methods=("GET", "POST"))
@auth.require(401)
def edit():
form = EditAccountForm(g.user)
if form.validate_on_submit():
form.populate_obj(g.user)
db.session.commit()
flash(_("Your account has been updated"), "success")
return redirect(url_for("frontend.index"))
return render_template("account/edit_account.html", form=form)
@account.route("/delete/", methods=("GET", "POST"))
@auth.require(401)
def delete():
# confirm password & recaptcha
form = DeleteAccountForm()
if form.validate_on_submit():
db.session.delete(g.user)
db.session.commit()
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
flash(_("Your account has been deleted"), "success")
return redirect(url_for("frontend.index"))
return render_template("account/delete_account.html", form=form)
@account.route("/follow/<int:user_id>/", methods=("POST",))
@auth.require(401)
def follow(user_id):
user = User.query.get_or_404(user_id)
g.user.follow(user)
db.session.commit()
body = render_template("emails/followed.html",
user=user)
mail.send_message(subject=_("%s is now following you" % g.user.username),
body=body,
sender=current_app.config.get('DEFAULT_MAIL_SENDER'),
recipients=[user.email])
return jsonify(success=True,
reload=True)
@account.route("/unfollow/<int:user_id>/", methods=("POST",))
@auth.require(401)
def unfollow(user_id):
user = User.query.get_or_404(user_id)
g.user.unfollow(user)
db.session.commit()
return jsonify(success=True,
reload=True)
|
bsd-3-clause
| 1,973,843,528,547,501,600
| 26.733871
| 79
| 0.589852
| false
| 4.079478
| false
| false
| false
|
vwc/agita
|
src/vwcollective.simplecontact/vwcollective/simplecontact/browser/contactfolderview.py
|
1
|
2856
|
from zope.interface import implements, Interface
from Acquisition import aq_inner
from Products.Five import BrowserView
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from vwcollective.simplecontact.interfaces import IContactFolder
from vwcollective.simplecontact.interfaces import ISimpleContact
from vwcollective.simplecontact.interfaces import IPreviewTagProvider
from vwcollective.simplecontact import simplecontactMessageFactory as _
class ContactFolderView(BrowserView):
"""
ContactFolder browser view
"""
template = ViewPageTemplateFile('contactfolderview.pt')
def __call__(self):
return self.template()
@property
def portal_catalog(self):
return getToolByName(self.context, 'portal_catalog')
@property
def portal(self):
return getToolByName(self.context, 'portal_url').getPortalObject()
def has_subfolders(self):
"""Test if we have subfolders"""
return len(self.contained_contactfolders()) > 0
def contained_contactfolders(self):
"""Query the catalog for contained ContactFolders in order to decide
wether to show a catagory preview or the simplecontacts directly"""
context = aq_inner(self.context)
return [dict(title=cf.Title,
description=cf.Description,
url=cf.getURL(),
preview_tag=IPreviewTagProvider(cf.getObject()).tag,
image=cf.getObject().image,
)
for cf in self.portal_catalog(object_provides=IContactFolder.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=1),
review_state='published',)
]
def contained_contacts(self):
"""List objects of type SimpleContact"""
context = aq_inner(self.context)
return [dict(title=c.Title,
url=c.getURL(),
profession=c.getObject().profession,
position=c.getObject().position,
email=c.getObject().email,
phone=c.getObject().phone,
image=c.getObject().image,
file=c.getObject().vita,
)
for c in self.portal_catalog(object_provides=ISimpleContact.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=1),
sort_on='getObjPositionInParent',
review_state='published',)
]
|
mit
| -2,913,841,775,823,524,000
| 41
| 98
| 0.569678
| false
| 4.898799
| false
| false
| false
|
RoyShulman/openstack-project
|
cinder_functions.py
|
1
|
2028
|
from cinderclient.v2 import client as cinderClient
import easygui
class Cinder:
def __init__(self, keystone_session):
self.cinder_client = cinderClient.Client(session=keystone_session)
def create_volume(self, instance_name):
"""
Create an empty block volume for an instance. Volume will be name INSTANCE_NAME + "Volume"
:param name: Name of the instance the volume will be added to
"""
try:
name = instance_name + "Volume"
self.cinder_client.volumes.create(size=1000, name=instance_name)
except Exception, e:
easygui.msgbox("Something went wrong, please try again")
finally:
return
def list_volumes(self):
"""
List all available volumes
:return: all available volumes
"""
try:
return self.cinder_client.volumes
except Exception, e:
print e
easygui.msgbox("Something went wrong, please try again")
return
def get_volume_id(self, volume_name):
"""
Return the volume ID of a given volume name
:param volume_name: Name of the volume
:return: string of the unique of ID
"""
try:
for volume in self.list_volumes():
if volume.name == volume_name:
return volume.id
except Exception, e:
print e
easygui.msgbox("Something went wrong please try again")
return
def attach_volume(self, instance_id, instance_name):
"""
Attach a volume to an instance
:param instance_id: Unique ID of the instance
:param instance_name: Name of the instance
"""
volume_id = self.get_volume_id(instance_name + "Volume")
try:
self.cinder_client.volumes.attach(volume_id, instance_id)
except Exception, e:
print e
easygui.msgbox("Something went wrong please try again")
return
|
mit
| -8,199,254,290,680,970,000
| 32.245902
| 98
| 0.582347
| false
| 4.506667
| false
| false
| false
|
mtik00/yamicache
|
setup.py
|
1
|
1167
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = []
setup_requirements = ['pytest-runner']
test_requirements = ['pytest']
setup(
name='yamicache',
version='0.6.0',
description="Yet another in-memory caching package",
long_description=readme + '\n\n' + history,
author="Timothy McFadden",
author_email='tim@timandjamie.com',
url='https://github.com/mtik00/yamicache',
packages=find_packages(include=['yamicache']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=True,
keywords='yamicache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
mit
| 1,442,082,234,181,209,600
| 26.785714
| 56
| 0.652099
| false
| 3.601852
| true
| true
| false
|
fosfataza/protwis
|
common/diagrams_arrestin.py
|
1
|
24369
|
from common.diagrams import Diagram
from common.definitions import ARRESTIN_SEGMENTS
from residue.models import Residue
from residue.models import ResidueGenericNumber
from residue.models import ResidueNumberingScheme
from django.utils.safestring import mark_safe
from math import cos, sin, pi, floor, sqrt
from datetime import datetime
from collections import OrderedDict
class DrawArrestinPlot(Diagram):
def __init__(self, residue_list, protein_class, protein_name, nobuttons = None):
self.nobuttons = 'arrestin'
self.type = 'snakeplot'
self.receptorId = protein_name
self.family = protein_class
self.output = ''
# residueType = 'sp'
# FIXME DO PUREIMAGE
# $pureImage = isset($_GET['pureimage']) && $_GET['pureimage'] == 'TRUE' ? TRUE : FALSE;
# get sequence, baldwin, and bw information of this receptor
self.sequence = residue_list
self.segments = {}
self.segments_full = OrderedDict()
i = 0
for r in self.sequence:
if r.protein_segment:
segment = str(r.protein_segment.slug)
elif r.segment_slug: # from family aligment
segment = str(r.segment_slug)
if segment not in self.segments:
self.segments[segment] = []
self.segments_full[segment] = r.protein_segment
label = ''
displaylabel = ''
if r.generic_number:
label = r.generic_number.label
elif hasattr(r, 'family_generic_number'):
label = r.family_generic_number
if r.display_generic_number: displaylabel = r.display_generic_number.label
displaylabel = r.amino_acid + str(r.sequence_number) + " \n " + displaylabel
if hasattr(r, 'frequency'):
displaylabel = displaylabel + "\n" + r.frequency
self.segments[segment].append([r.sequence_number,r.amino_acid,label,displaylabel])
i += 1
# for helix_num in range(1,2): #FIX for missing generic numbers
# rs = self.segments['H5']
# for i in range(0,len(rs)):
# if not rs[i][2]:
# if i+1<len(rs): #if there is a next one
# if rs[i+1][2]: #if it has generic number
# number = str(int(rs[i+1][2].split('x')[1])-1)
# rs[i][2] = str(helix_num) + "x" + number
# print(rs[i][2])
self.helixWidth = 75 # Width of helix
self.resNumPerRow = 4 # Residue number per row in helix
self.angleDeg = 22.0 # Angle size of each helix turn
self.residue_radius = 12 # Radius of the residue circle
# svg image padding offset
self.offsetX = -40 # -200
self.offsetY = 0 # -50
# margin between two helixes
self.margin = 0
# highest and lowest bound of this svg
self.high = 0
self.low = 0
# keep track of max Y positions of intra/extra loops
self.maxY = {'bottom': 0, 'top': 0}
self.maxX = {'left': 0, 'right': 0}
# helices length
# helicesLength = Svg::getSnakePlotHelicesLength($baldwin, $helixWidth, $angleDeg) #FIXME
# top and bottom residue coords in each helix
self.TBCoords = {}
self.output = ""
self.traceoutput = ""
self.helixoutput = ""
self.count = 1
self.count_sheet = 0
for s in ARRESTIN_SEGMENTS['Full']:
if self.segments_full[s].category == 'helix':
self.helixoutput += self.drawSnakePlotHelix(s)
self.count += 1
if self.segments_full[s].category == 'sheet':
self.helixoutput += self.drawSnakePlotSheet(s)
self.count += 1
self.count_sheet += 1
self.count = 0
for s in ARRESTIN_SEGMENTS['Full']:
if self.segments_full[s].category == 'loop' and s != 's19c':
#pass
try:
self.drawSnakePlotLoop(s)
except:
print(s)
else:
self.count += 1
self.drawSnakePlotTerminals()
def __str__(self):
self.output = "<g id=snake transform='translate(0, " + str(-self.low+ self.offsetY) + ")'>" + self.traceoutput+self.output+self.helixoutput+self.drawToolTip() + "</g>"; #for resizing height
return mark_safe(self.create(self.output,self.maxX['right']+40,self.high-self.low+self.offsetY*2,"snakeplot", self.nobuttons))
def drawSnakePlotHelix(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num % 2 != 0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = self.helixWidth + 40 + self.offsetX + (self.margin + self.helixWidth) * (helix_num - 1) - (self.count_sheet*20)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if ((helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number)) and i!=0:
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX-row_pos*self.residue_radius*1.6+indentX+bulgeX)
# Move down with right amount
y = round(startY+row*self.residue_radius*2.4+row_pos*self.residue_radius*0.5+indentY+bulgeY)
output_residue = self.DrawResidue(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotSheet(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num%2!=0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = 10+self.offsetX+(self.margin+self.helixWidth)*(helix_num-1)-(self.count_sheet*10)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if (helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number):
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
#output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX) #+indentX+bulgeX
# Move down with right amount
y = round(startY+i*self.residue_radius*1.5)
output_residue = self.DrawResidueSquare(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
# output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotLoop(self, segment):
y_offset = 20
font_size = 12
font_family = 'courier'
bezier_pull = 90
name = segment
x_at_max_y = 0
rs = self.segments[segment] # get residues
if self.count % 2 == 0:
position = 'bottom'
orientation = 1
else:
position = 'top'
orientation = -1
# what happens here?
if self.count not in self.TBCoords:
return 0
# Get positions of two linking residues from each helix
x1 = self.TBCoords[self.count][position][0]
y1 = self.TBCoords[self.count][position][1]
x2 = self.TBCoords[self.count + 1][position][0]
y2 = self.TBCoords[self.count + 1][position][1]
boxX = (x1+x2)/2 # midway between
if position=='top':
boxY = min(y1,y2)-y_offset # over helix
y_indent = -1*bezier_pull
if position=='bottom':
boxY = max(y1, y2) + y_offset # over helix
y_indent = bezier_pull
points = str(x1)+","+str(y1)+" "+str(boxX)+","+str(boxY)+" "+str(x2)+","+str(y2)
points2 = "M "+str(x1)+" "+str(y1)+" Q"+str(boxX)+" "+str(boxY+y_indent)+" "+str(x2)+" "+str(y2)
# Getting midpoint of Bezier curve http://www.svgbasics.com/curves.html
Dx = ((x1+boxX)/2)
Ex = ((x2+boxX)/2)
Fx = (Dx+Ex)/2
Dy = ((y1+boxY+y_indent)/2)
Ey = ((y2+boxY+y_indent)/2)
Fy = (Dy+Ey)/2
y_indent = y_indent*len(rs)/5 # get an approx need for y_indent for size of loop
super_loop_long_length = 40
between_residues = 18
length_of_residues_in_loop = len(rs)*between_residues-self.residue_radius
length = self.lengthbezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001)
if len(rs)<super_loop_long_length:
tries = 0 # adjust size
while abs(length-length_of_residues_in_loop-70)>5:
# print(abs(length-length_of_residues_in_loop+100),length,length_of_residues_in_loop,tries)
if length-length_of_residues_in_loop-70>5:
y_indent *=0.9
else:
y_indent *=1.1
length = self.lengthbezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001)
tries += 1
if tries>100:
break
pos = (length-length_of_residues_in_loop)/2 # get start pos
prev_where = [x1, y1]
# make rounded arc
points2 = "M "+str(x1)+" "+str(y1)+" Q"+str(boxX)+" "+str(boxY+y_indent)+" "+str(x2)+" "+str(y2)
labelbox = self.wherebezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001,length/2)
labelbox[1][1] += orientation*40
self.output += "<path class='"+name+"' d='" + points2 + "' stroke='black' fill='none' stroke-width='2' />"
max_y = y1
for i in range(0,len(rs)):
r = rs[i]
where = self.wherebezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001,pos)
self.output += self.DrawResidue(where[1][0],where[1][1],r[1], r[0], r[3], self.residue_radius-1,name)
pos += between_residues
if where[1][1]>self.high: self.high = where[1][1]
if where[1][1]<self.low: self.low = where[1][1]
prev_where = where[1][0],where[1][1]
if orientation==-1:
if where[1][1]<self.maxY[position]: self.maxY[position] = where[1][1]
else:
if where[1][1]>self.maxY[position]: self.maxY[position] = where[1][1]
if orientation==-1:
if where[1][1]<max_y:
max_y = where[1][1]
x_at_max_y = where[1][0]
else:
if where[1][1]>max_y:
max_y = where[1][1]
x_at_max_y = where[1][0]
x_at_max_y = where[1][0]
if orientation == 1:
max_y = max_y+25
else:
max_y = max_y-20
self.output += "<rect onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+"' x="+str(x_at_max_y-24)+" y="+str(max_y-13)+" rx=5 ry=5 width='55' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
self.output += str("<text onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+"' x="+str(x_at_max_y)+" y="+str(max_y)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
def drawSnakePlotTerminals(self):
y_offset = 50
font_size = 12
font_family = 'helvetica'
bezier_pull = 80
between_residues = 18
for name in ['ns1', 's19c']:
drawn_residues = []
if name not in self.segments: continue # continue if no terminus
rs = self.segments[name] # get residues
if name == 'ns1':
orientation = 1
# y_max = self.maxY['extra']-between_residues*4
position = 'bottom'
linked_helix = 1
y_max = self.TBCoords[linked_helix][position][1] + 200
x_max = self.maxX['right'] - 300
rs.reverse()
else:
orientation = 1
# y_max = self.maxY['intra']+between_residues*4
position = 'bottom'
linked_helix = 20
y_max = self.TBCoords[linked_helix][position][1] + 200
x_max = self.maxX['left'] - 300
x1 = self.TBCoords[linked_helix][position][0]
y1 = self.TBCoords[linked_helix][position][1]
# Get positions of two linking residues from each helix
x2 = x1 - 30
y2 = y1 + 80 * orientation
# Make line and box for short version
points = "M "+str(x1)+" "+str(y1)+" Q"+str(x1+30)+" "+str(y2)+" "+str(x2)+" "+str(y2)
self.output += "<path class='"+name+" short' d='" + points + "' stroke='black' fill='none' stroke-width='2' />"
self.output += "<rect class='"+name+" short segment' onclick='toggleLoop(\"."+name+"\",\"short\");' x="+str(x2-25)+" y="+str(y2-13)+" rx=5 ry=5 width='50' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
self.output += str("<text class='"+name+" short segment' onclick='toggleLoop(\"."+name+"\",\"short\");' x="+str(x2)+" y="+str(y2)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
x2 = x1-90*orientation
y2 = y_max
bezierX = x1+60*orientation
bezierY = (y_max+y1)/2+60*orientation
points = "M "+str(x1)+" "+str(y1)+" Q"+str(bezierX)+" "+str(bezierY)+" "+str(x2)+" "+str(y2)
pos = 40
length = self.lengthbezier([x1,y1],[bezierX,bezierY],[x2,y2],0.001)
bend = 0
distance_between_rows = 30
pos_bend = 0
bend_direction = -1 * orientation
for i in range(0,len(rs)):
r = rs[i]
if pos<length:
where = self.wherebezier([x1,y1],[bezierX,bezierY],[x2,y2],0.001,pos)
else:
if pos_bend==0 and bend!=0: #if first residue in line put in middle
where[1][0] = where[1][0]-between_residues*bend_direction
#where[1][0] = where[1][0]
where[1][1] = where[1][1]+orientation*distance_between_rows/2
elif pos_bend==between_residues and bend!=0: #if 2nd residue in line put in middle
#where[1][0] = where[1][0]-between_residues*bend_direction
where[1][0] = where[1][0]+between_residues*bend_direction
where[1][1] = where[1][1]+orientation*distance_between_rows/2
else:
where[1][0] = where[1][0]+between_residues*bend_direction
where[1][1] = where[1][1]
last_bend_x = where[1][0]
last_bend_y = where[1][1]
pos_bend += between_residues
if pos_bend>=abs(x2-x_max)-40: #no more bend left
pos_bend = 0
bend += 1
if bend_direction==1:
bend_direction = -1
elif bend_direction==-1:
bend_direction = 1
if i==0: self.output += "<line class='"+name+" long' x1="+str(x1)+" y1="+str(y1)+" x2="+str(where[1][0])+" y2="+str(where[1][1])+" stroke='black' fill='none' stroke-width='2' stroke-dasharray2='1,1' />"
if bend==0: labely = where[1][1]
drawn_residues.append(self.DrawResidue(where[1][0],where[1][1],r[1], r[0], rs[i][3], self.residue_radius-1,name+" long"))
pos += between_residues
if where[1][1]<self.low: self.low = where[1][1]
if where[1][1]>self.high: self.high = where[1][1]
if name=='s19c': drawn_residues = drawn_residues[::-1]
self.output += ''.join(drawn_residues)
self.output += "<rect onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+" long segment' x="+str(self.TBCoords[linked_helix][position][0]-40*orientation-25)+" y="+str((labely+self.TBCoords[linked_helix][position][1])/2-13)+" rx=5 ry=5 width='50' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
self.output += str("<text onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+" long segment' x="+str(self.TBCoords[linked_helix][position][0]-40*orientation)+" y="+str((labely+self.TBCoords[linked_helix][position][1])/2)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
|
apache-2.0
| -5,423,738,260,072,005,000
| 40.443878
| 384
| 0.507448
| false
| 3.370539
| false
| false
| false
|
consbio/python-databasin
|
tests/test_client.py
|
1
|
15329
|
from __future__ import absolute_import
import copy
import json
import zipfile
import pytest
import requests_mock
import six
from requests.models import Request
from databasin.client import Client
from databasin.exceptions import DatasetImportError
from .utils import make_api_key_callback
try:
from unittest import mock # Py3
except ImportError:
import mock # Py2
try:
import __builtin__ as builtins
except ImportError:
import builtins
LOGIN_URL = 'https://databasin.org/auth/api/login/'
@pytest.fixture()
def dataset_import_data():
return {
'id': 'a1b2c3',
'owner_id': 'user',
'private': False,
'title': 'Some Import',
'description': 'This dataset is a dataset.',
'create_date': '2015-11-17T22:42:06+00:00',
'modify_date': '2015-11-17T22:42:06+00:00',
'native': True,
'tags': ['one', 'two'],
'credits': None,
'failed': False,
'is_dataset_edit': False
}
@pytest.fixture()
def dataset_data():
return {
'id': 'a1b2c3',
'owner_id': 'user',
'private': False,
'title': 'Some Dataset',
'snippet': 'This dataset is...',
'create_date': '2015-11-17T22:42:06+00:00',
'modify_date': '2015-11-17T22:42:06+00:00',
'native': True,
'tags': ['one', 'two'],
'credits': None
}
@pytest.fixture
def import_job_data():
return {
'id': '1234',
'job_name': 'create_import_job',
'status': 'succeeded',
'progress': 100,
'message': json.dumps({'next_uri': '/datasets/import/a1b2c3/overview/'})
}
@pytest.fixture
def import_netcdf_job_data():
return {
'id': '1234',
'job_name': 'create_import_job',
'status': 'succeeded',
'progress': 100,
'message': json.dumps({'next_uri': '/datasets/a1b2c3/'})
}
@pytest.fixture
def finalize_job_data():
return {
'id': '1235',
'job_name': 'finalize_import_job',
'status': 'succeeded',
'progress': 100,
'message': json.dumps({'next_uri': '/datasets/a1b2c3/'})
}
@pytest.fixture
def tmp_file_data():
return {
'uuid': 'abcd',
'date': '2015-11-17T22:42:06+00:00',
'is_image': False,
'filename': '',
'url': 'https://example.com/file.txt'
}
def test_alternative_host():
c = Client('example.com:81')
assert c.base_url == 'https://example.com:81'
def test_https_referer():
"""Django requires all POST requests via HTTPS to have the Referer header set."""
c = Client()
r = c._session.prepare_request(Request('POST', LOGIN_URL))
c._session.get_adapter(LOGIN_URL).add_headers(r)
assert r.headers['Referer'] == LOGIN_URL
def test_login():
with requests_mock.mock() as m:
m.get('https://databasin.org/', cookies={'csrftoken': 'abcd'})
m.post(LOGIN_URL, cookies={'sessionid': 'asdf'})
c = Client()
c.login('foo', 'bar')
assert m.call_count == 2
def test_login_no_redirect():
with requests_mock.mock() as m:
m.get('https://databasin.org/redirect/')
m.get('https://databasin.org/', cookies={'csrftoken': 'abcd'})
m.get(LOGIN_URL, cookies={'csrftoken': 'abcd'})
m.post(
LOGIN_URL, headers={'Location': 'https://databasin.org/'}, cookies={'sessionid': 'asdf'}, status_code=302
)
c = Client()
c.login('foo', 'bar')
assert m.call_count == 2
assert not any(r.url for r in m.request_history if r.url == 'https://databasin.org/redirect/')
def test_import_lpk(import_job_data, dataset_data, dataset_import_data, finalize_job_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_job_data))
m.get('https://databasin.org/api/v1/dataset_imports/a1b2c3/', text=json.dumps(dataset_import_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1235/'})
m.get('https://databasin.org/api/v1/jobs/1235/', text=json.dumps(finalize_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
f = six.BytesIO()
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_lpk('test.lpk')
open_mock.assert_called_once_with('test.lpk', 'rb')
assert m.call_count == 7
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'ArcGIS_Native'
def test_import_lpk_with_api_key(import_job_data, dataset_data, dataset_import_data, finalize_job_data, tmp_file_data):
key = 'abcdef123456'
with requests_mock.mock() as m:
m.post(
'https://databasin.org/uploads/upload-temporary-file/',
text=make_api_key_callback(json.dumps({'uuid': 'abcd'}), key)
)
m.get(
'https://databasin.org/api/v1/uploads/temporary-files/abcd/',
text=make_api_key_callback(json.dumps(tmp_file_data), key)
)
m.post(
'https://databasin.org/api/v1/jobs/',
headers={'Location': '/api/v1/jobs/1234/'},
text=make_api_key_callback('', key)
)
m.get(
'https://databasin.org/api/v1/jobs/1234/',
text=make_api_key_callback(json.dumps(import_job_data), key)
)
m.get(
'https://databasin.org/api/v1/dataset_imports/a1b2c3/',
text=make_api_key_callback(json.dumps(dataset_import_data), key)
)
m.post(
'https://databasin.org/api/v1/jobs/',
headers={'Location': '/api/v1/jobs/1235/'},
text=make_api_key_callback('', key)
)
m.get('https://databasin.org/api/v1/jobs/1235/', text=make_api_key_callback(json.dumps(finalize_job_data), key))
m.get(
'https://databasin.org/api/v1/datasets/a1b2c3/',
text=make_api_key_callback(json.dumps(dataset_data), key)
)
f = six.BytesIO()
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
c.set_api_key('user', key)
dataset = c.import_lpk('test.lpk')
open_mock.assert_called_once_with('test.lpk', 'rb')
assert m.call_count == 7
assert dataset.id == 'a1b2c3'
def test_import_lpk_with_xml(import_job_data, dataset_data, dataset_import_data, finalize_job_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/datasets/1234/import/metadata/')
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_job_data))
m.get('https://databasin.org/api/v1/dataset_imports/a1b2c3/', text=json.dumps(dataset_import_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1235/'})
m.get('https://databasin.org/api/v1/jobs/1235/', text=json.dumps(finalize_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
f = mock.Mock()
f.read = mock.Mock(return_value='')
f.__enter__ = mock.Mock(return_value=f)
f.__exit__ = mock.Mock(return_value=f)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_lpk('test.lpk', 'test.xml')
open_mock.assert_any_call('test.xml')
open_mock.assert_any_call('test.lpk', 'rb')
assert m.call_count == 8
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'ArcGIS_Native'
def test_import_netcdf_dataset_with_zip(import_netcdf_job_data, dataset_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_netcdf_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
f = six.BytesIO()
with zipfile.ZipFile(f, 'w') as zf:
zf.writestr('test.nc', '')
zf.writestr('style.json', '')
f.seek(0)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_netcdf_dataset('test.zip')
open_mock.assert_called_once_with('test.zip', 'a+b')
assert m.call_count == 5
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'NetCDF_Native'
def test_import_netcdf_dataset_with_nc(import_netcdf_job_data, dataset_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_netcdf_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
with mock.patch.object(zipfile, 'ZipFile', mock.MagicMock()) as zf_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_netcdf_dataset('test.nc', style={'foo': 'bar'})
zf_mock().write.assert_called_once_with('test.nc', 'test.nc')
assert m.call_count == 5
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'NetCDF_Native'
def test_import_netcdf_dataset_with_api_key(import_netcdf_job_data, dataset_data, tmp_file_data):
key = 'abcde12345'
with requests_mock.mock() as m:
m.post(
'https://databasin.org/uploads/upload-temporary-file/',
text=make_api_key_callback(json.dumps({'uuid': 'abcd'}), key)
)
m.get(
'https://databasin.org/api/v1/uploads/temporary-files/abcd/',
text=make_api_key_callback(json.dumps(tmp_file_data), key)
)
m.post(
'https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'},
text=make_api_key_callback('', key)
)
m.get(
'https://databasin.org/api/v1/jobs/1234/',
text=make_api_key_callback(json.dumps(import_netcdf_job_data), key)
)
m.get(
'https://databasin.org/api/v1/datasets/a1b2c3/',
text=make_api_key_callback(json.dumps(dataset_data), key)
)
with mock.patch.object(zipfile, 'ZipFile', mock.MagicMock()) as zf_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
c.set_api_key('user', key)
dataset = c.import_netcdf_dataset('test.nc', style={'foo': 'bar'})
zf_mock().write.assert_called_once_with('test.nc', 'test.nc')
assert m.call_count == 5
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'NetCDF_Native'
def test_import_netcdf_dataset_with_invalid_file():
c = Client()
with pytest.raises(ValueError):
c.import_netcdf_dataset('test.foo')
def test_import_netcdf_dataset_with_no_style():
f = six.BytesIO()
with zipfile.ZipFile(f, 'w') as zf:
zf.writestr('test.nc', '')
f.seek(0)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
with pytest.raises(ValueError):
c.import_netcdf_dataset('test.zip')
def test_import_netcdf_dataset_incomplete(import_job_data, tmp_file_data, dataset_import_data):
import_job_data = copy.copy(import_job_data)
import_job_data['message'] = json.dumps({'next_uri': '/datasets/import/a1b2c3/overview/'})
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_job_data))
m.get('https://databasin.org/api/v1/dataset_imports/a1b2c3/', text=json.dumps(dataset_import_data))
m.delete('https://databasin.org/api/v1/dataset_imports/a1b2c3/')
f = six.BytesIO()
with zipfile.ZipFile(f, 'w') as zf:
zf.writestr('test.nc', '')
zf.writestr('style.json', '')
f.seek(0)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
with pytest.raises(DatasetImportError):
c.import_netcdf_dataset('test.zip')
assert m.call_count == 6
|
bsd-3-clause
| -4,360,527,989,050,836,500
| 38.507732
| 120
| 0.591363
| false
| 3.192876
| true
| false
| false
|
ujdhesa/unisubs
|
unisubs_settings.py
|
1
|
3595
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from datetime import timedelta
from settings import *
from server_local_settings import *
DEBUG = False
ADMINS = (
('Craig Zheng', 'craig@pculture.org'),
('universalsubtitles-errors', 'universalsubtitles-errors@pculture.org')
)
if INSTALLATION == DEV:
ADMINS = (
('Evan', 'ehazlett@pculture.org'),
)
SITE_ID = 16
SITE_NAME = 'unisubsdev'
REDIS_DB = "3"
EMAIL_SUBJECT_PREFIX = '[usubs-dev]'
SENTRY_TESTING = True
SOLR_ROOT = '/usr/share/'
CELERY_TASK_RESULT_EXPIRES = timedelta(days=7)
elif INSTALLATION == STAGING:
SITE_ID = 17
SITE_NAME = 'unisubsstaging'
REDIS_DB = "2"
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
EMAIL_SUBJECT_PREFIX = '[usubs-staging]'
CELERY_TASK_RESULT_EXPIRES = timedelta(days=7)
elif INSTALLATION == PRODUCTION:
SITE_ID = 18
SITE_NAME = 'unisubs'
REDIS_DB = "1"
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
EMAIL_SUBJECT_PREFIX = '[usubs-production]'
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
ADMINS = (
('universalsubtitles-errors', 'universalsubtitles-errors@pculture.org'),
)
# only send actual email on the production server
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
elif INSTALLATION == DEMO:
DEBUG = True
REDIS_DB = "4"
SENTRY_TESTING = True
elif INSTALLATION == LOCAL:
SITE_ID = 14
SITE_NAME = 'unisubsstaging'
ADMINS = (
('Evan', 'ehazlett@pculture.org'),
)
if INSTALLATION == STAGING or INSTALLATION == PRODUCTION or INSTALLATION == LOCAL:
DATABASE_ROUTERS = ['routers.UnisubsRouter']
AWS_STORAGE_BUCKET_NAME = DEFAULT_BUCKET
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_URL = STATIC_URL
SOLR_ROOT = '/usr/share/'
CELERYD_LOG_LEVEL = 'INFO'
CELERY_REDIRECT_STDOUTS = True
CELERY_REDIRECT_STDOUTS_LEVEL = 'INFO'
RECAPTCHA_PUBLIC = '6LftU8USAAAAADia-hmK1RTJyqXjFf_T5QzqLE9o'
IGNORE_REDIS = True
ALARM_EMAIL = FEEDBACK_EMAILS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': DATABASE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'HOST': DATABASE_HOST,
'PORT': '3306'
}
}
DATABASES.update(uslogging_db)
USE_AMAZON_S3 = AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and DEFAULT_BUCKET
try:
from settings_local import *
except ImportError:
pass
if USE_AMAZON_S3:
AWS_BUCKET_NAME = AWS_STORAGE_BUCKET_NAME
COMPRESS_MEDIA = not DEBUG
STATIC_URL_BASE = STATIC_URL
if COMPRESS_MEDIA:
STATIC_URL += "%s/%s/" % (COMPRESS_OUTPUT_DIRNAME, LAST_COMMIT_GUID.split("/")[1])
ADMIN_MEDIA_PREFIX = "%sadmin/" % STATIC_URL_BASE
# the keyd cache apps need this:
CACHE_TIMEOUT = 60
CACHE_PREFIX = "unisubscache"
|
agpl-3.0
| 6,851,062,375,894,113,000
| 28.467213
| 86
| 0.691516
| false
| 3.159051
| false
| false
| false
|
chengsoonong/crowdastro
|
crowdastro/active_learning/random_sampler.py
|
1
|
2413
|
"""Learning with random sampling.
Pool-based. Binary class labels.
Matthew Alger
The Australian National University
2016
"""
import numpy
from .sampler import Sampler
class RandomSampler(Sampler):
"""Pool-based learning with random sampling."""
def sample_index(self):
"""Finds index of a random unlabelled point."""
unlabelled = self.labels.mask.nonzero()[0]
if len(unlabelled):
index = numpy.random.choice(unlabelled)
return index
return 0
def sample_indices(self, n):
"""Finds indices of n random unlabelled points."""
indices = set()
unlabelled = self.labels.mask.nonzero()[0]
if len(unlabelled) < n:
return unlabelled
while len(indices) < n:
index = numpy.random.choice(unlabelled)
indices.add(index)
return sorted(indices)
class BalancedSampler(RandomSampler):
"""Pool-based learning with balanced random sampling.
WARNING: This class can "peek" at the true labels!
"""
def sample_index(self):
"""Finds index of a random unlabelled point."""
unlabelled = self.labels.mask.nonzero()[0]
unlabelled_groundtruth = self.labels.data[unlabelled]
if len(unlabelled):
if numpy.random.random() < 0.5:
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 1])
else:
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 0])
return index
return 0
def sample_indices(self, n):
"""Finds indices of n random unlabelled points."""
indices = set()
unlabelled = self.labels.mask.nonzero()[0]
if len(unlabelled) < n:
return unlabelled
unlabelled_groundtruth = self.labels.data[unlabelled]
while len(indices) < n:
if ((numpy.random.random() < 0.5 and
len(unlabelled[unlabelled_groundtruth == 1]) > 0) or
len(unlabelled[unlabelled_groundtruth == 0]) == 0):
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 1])
else:
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 0])
indices.add(index)
return sorted(indices)
|
mit
| 8,591,367,075,360,485,000
| 27.388235
| 72
| 0.581434
| false
| 4.110733
| false
| false
| false
|
0xF1/nessus_tools
|
mobile_devices_parser.py
|
1
|
2836
|
#!/usr/bin/env python
'''
mobile devices parser
Version 0.1
by Roy Firestein (roy@firestein.net)
Parse mobile devices audit plugin and export to CSV
'''
import os
import xml.dom.minidom
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", action="store", type="string", dest="file", help="Nessus file to parse")
parser.add_option("-o", "--output", action="store", type="string", dest="output", help="output file name")
(menu, args) = parser.parse_args()
devices = {"Android": [], "iPhone": [], "iPad": []}
def main():
nes_file = menu.file
report = xml.dom.minidom.parse(nes_file)
for el in report.getElementsByTagName('ReportItem'):
if el.getAttribute("pluginID") == "60035":
# find plugin_output element
output = get_plugin_output(el)
model = get_model(output)
version = get_version(output)
user = get_user(output)
serial = get_serial(output)
item = {"serial": serial, "version": version, "user": user}
if not item in devices[model]:
devices[model].append(item)
print "%s\t%s\t%s\t%s" %(model, version, user, serial)
if len(devices['iPhone']) > 0 or len(devices['iPad']) > 0 or len(devices['Android']) > 0:
save_csv(devices)
def save_csv(devices):
fh = open(menu.output, "w")
fh.write("Platform,User,Version,Serial\n")
for d in devices['iPhone']:
fh.write('"%s","%s","%s","%s"\n' %("iPhone", d['user'], d['version'], d['serial']))
for d in devices['iPad']:
fh.write('"%s","%s","%s","%s"\n' %("iPad", d['user'], d['version'], d['serial']))
for d in devices['Android']:
fh.write('"%s","%s","%s","%s"\n' %("Android", d['user'], d['version'], d['serial']))
fh.close()
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def get_plugin_output(el):
a = el.getElementsByTagName("plugin_output")[0]
return getText(a.childNodes)
def get_model(data):
for line in data.split("\n"):
if line.startswith("Model"):
return line.split(" ")[2]
return None
def get_version(data):
for line in data.split("\n"):
if line.startswith("Version"):
return line.split(" ")[2]
return None
def get_user(data):
for line in data.split("\n"):
if line.startswith("User"):
return line.split(" ")[2]
return None
def get_serial(data):
for line in data.split("\n"):
if line.startswith("Serial"):
return line.split(" ")[3]
return None
if __name__ == "__main__":
main()
|
apache-2.0
| -6,459,428,287,161,355,000
| 26.269231
| 107
| 0.554302
| false
| 3.580808
| false
| false
| false
|
tjsteele/battle-simulator
|
source/main.py
|
1
|
3135
|
import random, time
currPlayerHitToken = True
currOpponentHitToken = True
class GenerateMonster():
def __init__(self, name, health, gold, weapon, ac):
self.name = name
self.health = health
self.gold = gold
self.weapon = weapon
self.ac = ac
def checkMissPlayer(defender):
"""
Returns a boolean token: if player missed or not.
If defenders AC (Armor Class) is above players hit the token will evaluate to False,
and the player will respectively miss.
"""
global currPlayerHitToken
missChance = random.randrange(0, 25)
if missChance <= defender:
currPlayerHitToken = False
return currPlayerHitToken
else:
currPlayerHitToken = True
return currPlayerHitToken
def checkMissOpponent(defender):
"""
Returns a boolean token: if opponent missed or not.
If defenders AC (Armor Class) is above opponents hit, the token will evaluate to False,
and the opponent will respectively miss.
"""
global currOpponentHitToken
missChance = random.randrange(0, 25) # make this variable
if missChance <= defender:
currOpponentHitToken = False
return currOpponentHitToken
else:
currPlayerHitToken = True
return currOpponentHitToken
def determineDamage(weapon, modifier, directed):
"""
Returns an integer: damage inflicted by the weapon.
Relative to the player/opponent's weapon, inflictDamage is called and the function's
effects to the opposing's HP is calculated.
"""
if weapon == "fists" or weapon == "claws":
return inflictDamage(player, 2 * modifier, 6 * modifier)
elif weapon == "Iron Broadsword":
return inflictDamage(opponent, 100, 250)
return
def inflictDamage(inflicted, min, max):
"""
Returns damage inflicted to determineDamage: which is called in main().
"""
damageInflicted = random.randrange(min, max+1)
if damageInflicted == 0:
return 'Miss!'
else:
inflicted.health-=damageInflicted
return damageInflicted
def getWinner(player, enemy):
"""
Returns winner of the match by comparing object's HP attribute once knocked below zero.
"""
if player.health > enemy.health:
print player.name, 'wins!'
else:
print enemy.name, 'wins!'
def getHP(character):
return character.health
opponent = GenerateMonster('Goblin King', 1000, 100, 'fists', 15)
player = GenerateMonster('Paladin', 150, 200, 'Iron Broadsword', 15)
def main():
playerInitialHealth = player.health
opponentInitialHealth = opponent.health
while (opponent.health >= 0) and (player.health >= 0):
time.sleep(1)
if (currPlayerHitToken):
print "%s HP:" % player.name, getHP(player)
print "Damage to %s:" % opponent.name, determineDamage(player.weapon, 1, opponent.health)
else:
print '%s HP:' % player.name, getHP(player)
print '%s missed!' % player.name
time.sleep(1)
if(currOpponentHitToken):
print "%s HP:" % opponent.name, getHP(opponent)
print "Damage to: %s" % player.name, determineDamage(opponent.weapon, 1, player.health)
else:
print "%s HP:" % opponent.name, getHP(opponent)
print '%s missed!' % opponent.name
getWinner(player, opponent)
if __name__ == "__main__":
main()
|
mit
| -4,317,210,588,742,869,500
| 22.75
| 93
| 0.708453
| false
| 3.070519
| false
| false
| false
|
TC01/calcpkg
|
calcrepo/repos/ticalc.py
|
1
|
4723
|
import urllib
from calcrepo import info
from calcrepo import repo
name = "ticalc"
url = "http://www.ticalc.org/"
enabled = True
class TicalcRepository(repo.CalcRepository):
def formatDownloadUrl(self, url):
return "http://www.ticalc.org" + url
def updateRepoIndexes(self, verbose=False):
self.printd("Reading ticalc.org master index (this will take some time).")
# First read in the text (the only network process involved)
masterIndex = urllib.urlopen('http://www.ticalc.org/pub/master.index').read()
self.printd(" Read in ticalc.org master index.")
# Delete and open new indices
files = self.openIndex(self.index.fileIndex, "files index")
names = self.openIndex(self.index.nameIndex, "names index")
if files is None or names is None:
try:
files.close()
except:
return
# Now, parse the enormous data and write index files
self.printd(" ")
masterIndex = masterIndex[39:]
directory = ""
while len(masterIndex) > 2:
line = masterIndex[:masterIndex.find('\n')]
masterIndex = masterIndex[masterIndex.find('\n') + 1:]
if line == "":
continue
if line[:9] == "Index of ":
dirData = line[9:]
directory = dirData[:dirData.find(" ")]
if verbose:
self.printd(" Caching " + line[9:])
else:
fileData = line[:line.find(" ")]
files.write(directory + '/' + fileData + '\n')
nameData = line[len(fileData)+1:].lstrip()
names.write(nameData + '\n')
# Close the indexes now
files.close()
names.close()
self.printd("Finished updating ticalc.org repo.\n")
def getFileInfo(self, fileUrl, fileName):
#Get the category path for the file
categoryPath = "http://www.ticalc.org/"
splitUrls = fileUrl.split('/')
for splitUrl in splitUrls:
if splitUrl != "" and (not "." in splitUrl):
categoryPath += splitUrl + '/'
#Now open the category page and extract the URL for the file info page
categoryPage = urllib.urlopen(categoryPath, "")
categoryData = categoryPage.read()
categoryPage.close()
index = categoryData.find(fileUrl) - 7
rIndex = categoryData.rfind('A HREF="', 0, index)
infoUrl = categoryData[rIndex + 9:]
infoUrl = "http://www.ticalc.org/" + infoUrl[:infoUrl.find('">')]
#Create a file info object
fileInfo = info.FileInfo(fileUrl, fileName, infoUrl, self.output)
infoPage = urllib.urlopen(infoUrl)
infoText = infoPage.read()
infoPage.close()
#Fill in all the data bits
fileInfo.description = self.getBaseFileData(infoText, "Description")
fileInfo.fileSize = self.getBaseFileData(infoText, "File Size")
fileInfo.fileDate = self.getBaseFileData(infoText, "File Date and Time", 47, 2)
fileInfo.documentation = self.getBaseFileData(infoText, "Documentation Included?")
fileInfo.sourceCode = self.getBaseFileData(infoText, "Source Code")
fileInfo.category = self.getFileCategory(infoText)
fileInfo.author = self.getFileAuthor(infoText)
fileInfo.downloads = self.getNumDownloads(infoText)
fileInfo.repository = self.name
#Print the file info object
fileInfo.printData(self.output)
return fileInfo
def getBaseFileData(self, fileInfo, data, index1 = 47, index2 = 1):
"""Function to initialize the simple data for file info"""
result = fileInfo[fileInfo.find(data):]
result = result[result.find("<FONT ") + index1:]
result = result[:result.find("</FONT>") - index2]
return result
def getFileCategory(self, fileInfo):
"""Function to get the file category for file info"""
category = fileInfo[fileInfo.find("Category"):]
category = category[category.find("<FONT ") + 47:]
category = category[category.find('">') + 2:]
category = category[:category.find("</A></B>") - 0]
return category
def getFileAuthor(self, fileInfo):
"""Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here"""
author = fileInfo[fileInfo.find("Author"):]
author = author[author.find("<FONT ") + 47:]
author = author[author.find('<B>') + 3:]
authormail = author[author.find("mailto:") + 7:]
authormail = authormail[:authormail.find('"')]
author = author[:author.find("</B></A>") - 0]
author = author + " (" + authormail + ")"
return author
def getNumDownloads(self, fileInfo):
"""Function to get the number of times a file has been downloaded"""
downloads = fileInfo[fileInfo.find("FILE INFORMATION"):]
if -1 != fileInfo.find("not included in ranking"):
return "0"
downloads = downloads[:downloads.find(".<BR>")]
downloads = downloads[downloads.find("</A> with ") + len("</A> with "):]
return downloads
def getRepository():
"""Returns the relevant CalcRepository object for this repo file"""
global name, url
return TicalcRepository(name, url)
|
mit
| 8,673,408,777,575,601,000
| 34.780303
| 124
| 0.691086
| false
| 3.263994
| false
| false
| false
|
MayankGo/ec2-api
|
ec2api/api/__init__.py
|
1
|
26399
|
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Starting point for routing EC2 requests.
"""
import hashlib
import json
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests
import six
import webob
import webob.dec
import webob.exc
from ec2api.api import apirequest
from ec2api.api import ec2utils
from ec2api.api import faults
from ec2api import context
from ec2api import exception
from ec2api.i18n import _
from ec2api import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.StrOpt('keystone_url',
default='http://localhost',
help='URL to get token from ec2 request.'),
cfg.StrOpt('keystone_sig_url',
default='$keystone_url/ec2-auth',
help='URL to validate signature/access key in ec2 request.'),
cfg.StrOpt('keystone_token_url',
default='$keystone_url/token-auth',
help='URL to validate token in ec2 request.'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'ec2api.api.auth')
EMPTY_SHA256_HASH = (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
# This is the buffer size used when calculating sha256 checksums.
# Experimenting with various buffer sizes showed that this value generally
# gave the best result (in terms of performance).
PAYLOAD_BUFFER = 1024 * 1024
# Fault Wrapper around all EC2 requests #
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception:
LOG.exception(_("FaultWrapper cathes error"))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
action = apireq.action
else:
action = None
ctxt = request.environ.get('ec2api.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class InvalidCredentialsException(Exception):
def __init__(self, msg):
super(Exception, self).__init__()
self.msg = msg
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
resourceIdMapping = {
'CreateVpc' : '*',
'CreateSubnet' : '*',
'CreateRouteTable' : '*',
'CreateRoute' : 'RouteTableId',
'CreateSecurityGroup' : '*',
'DeleteVpc' : 'VpcId',
'DeleteSubnet' : 'SubnetId',
'DeleteRouteTable' : 'RouteTableId',
'DeleteSecurityGroup' : 'GroupId',
'DeleteRoute' : 'RouteTableId',
'AssociateRouteTable' : 'SubnetId',
'DisassociateRouteTable' : 'AssociationId',
'AuthorizeSecurityGroupIngress' : 'GroupId',
'AuthorizeSecurityGroupEgress' : 'GroupId',
'RevokeSecurityGroupEgress' : 'GroupId',
'RevokeSecurityGroupIngress' : 'GroupId',
'DescribeVpcs' : '*',
'DescribeSubnets' : '*',
'DescribeRouteTables' : '*',
'DescribeSecurityGroups' : '*',
'AllocateAddress' : '',
'AssociateAddress' : '',
'DisassociateAddress' : '',
'ReleaseAddress' : '',
'DescribeAddresses' : '',
'CreateExtnetwork' : '',
'UpdateQuota' : '',
'ShowQuota' : ''
}
armappingdict = {
'CreateVpc': {
"action": "jrn:jcs:vpc:CreateVpc",
"resource": "jrn:jcs:vpc::Vpc:",
"implicit_allow": "False"
},
'DeleteVpc':
{
"action": "jrn:jcs:vpc:DeleteVpc",
"resource": "jrn:jcs:vpc::Vpc:",
"implicit_allow": "False"
},
'DescribeVpcs':
{
"action": "jrn:jcs:vpc:DescribeVpcs",
"resource": "jrn:jcs:vpc::Vpc:",
"implicit_allow": "False"
},
'CreateSubnet':
{
"action": "jrn:jcs:vpc:CreateSubnet",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'DeleteSubnet':
{
"action": "jrn:jcs:vpc:DeleteSubnet",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'DescribeSubnets':
{
"action": "jrn:jcs:vpc:DescribeSubnets",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'CreateRouteTable':
{
"action": "jrn:jcs:vpc:CreateRouteTable",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'DeleteRouteTable':
{
"action": "jrn:jcs:vpc:DeleteRouteTable",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'AssociateRouteTable':
{
"action": "jrn:jcs:vpc:AssociateRouteTable",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'DisassociateRouteTable':
{
"action": "jrn:jcs:vpc:DisassociateRouteTable",
"resource": "jrn:jcs:vpc::AssociatedRouteTable:",
"implicit_allow": "False"
},
'DescribeRouteTables':
{
"action": "jrn:jcs:vpc:DescribeRouteTables",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'CreateRoute':
{
"action": "jrn:jcs:vpc:CreateRoute",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'DeleteRoute':
{
"action": "jrn:jcs:vpc:DeleteRoute",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'AllocateAddress': None,
'AssociateAddress': None,
'DisassociateAddress': None,
'ReleaseAddress': None,
'DescribeAddresses': None,
'CreateSecurityGroup':
{
"action": "jrn:jcs:vpc:CreateSecurityGroup",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'DeleteSecurityGroup':
{
"action": "jrn:jcs:vpc:DeleteSecurityGroup",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'DescribeSecurityGroups':
{
"action": "jrn:jcs:vpc:DescribeSecurityGroups",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'AuthorizeSecurityGroupEgress':
{
"action": "jrn:jcs:vpc:AuthorizeSecurityGroupEgress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'AuthorizeSecurityGroupIngress':
{
"action": "jrn:jcs:vpc:AuthorizeSecurityGroupIngress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'RevokeSecurityGroupEgress':
{
"action": "jrn:jcs:vpc:RevokeSecurityGroupEgress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'RevokeSecurityGroupIngress':
{
"action": "jrn:jcs:vpc:RevokeSecurityGroupIngress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'CreateExtnetwork': None,
'UpdateQuota': None,
'ShowQuota' : None
}
def _get_signature(self, req):
"""Extract the signature from the request.
This can be a get/post variable or for version 4 also in a header
called 'Authorization'.
- params['Signature'] == version 0,1,2,3
- params['X-Amz-Signature'] == version 4
- header 'Authorization' == version 4
"""
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
if sig is not None:
return sig
if 'Authorization' not in req.headers:
return None
auth_str = req.headers['Authorization']
if not auth_str.startswith('AWS4-HMAC-SHA256'):
return None
return auth_str.partition("Signature=")[2].split(',')[0]
def _get_access(self, req):
"""Extract the access key identifier.
For version 0/1/2/3 this is passed as the AccessKeyId parameter, for
version 4 it is either an X-Amz-Credential parameter or a Credential=
field in the 'Authorization' header string.
"""
access = req.params.get('JCSAccessKeyId')
if access is not None:
return access
cred_param = req.params.get('X-Amz-Credential')
if cred_param:
access = cred_param.split("/")[0]
if access is not None:
return access
if 'Authorization' not in req.headers:
return None
auth_str = req.headers['Authorization']
if not auth_str.startswith('AWS4-HMAC-SHA256'):
return None
cred_str = auth_str.partition("Credential=")[2].split(',')[0]
return cred_str.split("/")[0]
def _get_auth_token(self, req):
"""Extract the Auth token from the request
This is the header X-Auth-Token present in the request
"""
auth_token = None
auth_token = req.headers.get('X-Auth-Token')
return auth_token
def _get_resource_id(self, req, action):
resource = None
resourceId = None
resource = self.resourceIdMapping[action]
if '*' == resource:
resourceId = resource
elif '' == resource:
resourceId = resource
else:
resourceId = req.params.get(resource)
return resourceId
def _get_action_resource_mapping(self, req):
armvalue = None
action = req.params.get('Action')
try:
actiondict = self.armappingdict[action]
if actiondict == None:
# No mapping available. Pass an empty list.
armvalue = []
else:
# Create a new instance of the action resource mapping dictionary for subsequent
# modifications and pass it as a member of a list
armvalue = [dict(actiondict)]
except KeyError:
return armvalue
return armvalue
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
# NOTE(alevine) We need to calculate the hash here because
# subsequent access to request modifies the req.body so the hash
# calculation will yield invalid results.
headers = {'Content-Type': 'application/json'}
auth_token = self._get_auth_token(req)
if None == auth_token:
signature = self._get_signature(req)
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = self._get_access(req)
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers:
params = {}
else:
# Make a copy of args for authentication and signature verification
params = dict(req.params)
# Not part of authentication args
params.pop('Signature', None)
#version = params.pop('Version')
action = req.params.get('Action')
arm = {}
arm = self._get_action_resource_mapping(req)
if None == arm:
msg = _("Action : " + action + " Not Found")
return faults.ec2_error_response(request_id, "ActionNotFound", msg,
status=404)
resourceId = None
resourceId = self._get_resource_id(req, action)
if None == resourceId:
msg = _("Action is : " + action + " and ResourceId Not Found")
return faults.ec2_error_response(request_id, "ResourceIdNotFound", msg,
status=404)
if '' != resourceId:
arm[0]['resource'] = arm[0].get('resource') + resourceId
if auth_token:
data = {}
iam_validation_url = CONF.keystone_token_url
headers['X-Auth-Token'] = auth_token
data['action_resource_list'] = arm
data = jsonutils.dumps(data)
else:
host = req.host.split(':')[0]
cred_dict = {
'access': access,
'action_resource_list': arm,
'body_hash': '',
'headers': {},
'host': host,
'signature': signature,
'verb': req.method,
'path': '/',
'params': params,
}
iam_validation_url = CONF.keystone_sig_url
if "ec2" in iam_validation_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
data = jsonutils.dumps(creds)
verify = CONF.ssl_ca_file or not CONF.ssl_insecure
response = requests.request('POST', iam_validation_url, verify=verify,
data=data, headers=headers)
status_code = response.status_code
if status_code != 200:
LOG.error("Request headers - %s", str(headers))
LOG.error("Request params - %s", str(data))
LOG.error("Response headers - %s", str(response.headers))
LOG.error("Response content - %s", str(response._content))
msg = response.reason
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=status_code)
result = response.json()
try:
user_id = result['user_id']
project_id = result['account_id']
if auth_token:
token_id = auth_token
else:
token_id = result['token_id']
if not token_id or not project_id or not user_id:
raise KeyError
user_name = project_name = 'default'
roles = []
catalog = []
except (AttributeError, KeyError):
LOG.exception(_("Keystone failure"))
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
ctxt = context.RequestContext(user_id, project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog,
api_version=req.params.get('Version'))
req.environ['ec2api.context'] = ctxt
return self.application
class Requestify(wsgi.Middleware):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'JCSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(
req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params.get('SignatureVersion')
if version and int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
args.pop(non_arg, None)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug('action: %s', action)
for key, value in args.items():
LOG.debug('arg: %(key)s\t\tval: %(value)s',
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(
action, req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, unexpected=False):
"""Return an EC2 error response.
Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
suppress their messages for security.
"""
code = exception_to_ec2code(ex)
for status_name in ('code', 'status', 'status_code', 'http_status'):
status = getattr(ex, status_name, None)
if isinstance(status, int):
break
else:
status = 500
if unexpected:
log_fun = LOG.error
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
exc_info = sys.exc_info()
else:
log_fun = LOG.debug
log_msg = _("%(ex_name)s raised: %(ex_str)s")
exc_info = None
context = req.environ['ec2api.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': unicode(ex)
}
log_fun(log_msg % log_msg_args, context=context, exc_info=exc_info)
if unexpected and status >= 500:
message = _('Unknown error occurred.')
elif getattr(ex, 'message', None):
message = unicode(ex.message)
elif ex.args and any(arg for arg in ex.args):
message = " ".join(map(unicode, ex.args))
else:
message = unicode(ex)
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
log_fun(_('Environment: %s') % jsonutils.dumps(env))
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action', passing 'ec2api.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2api.context']
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except Exception as ex:
return ec2_error_ex(
ex, req, unexpected=not isinstance(ex, exception.EC2Exception))
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
|
apache-2.0
| -3,200,540,836,945,918,000
| 38.638138
| 97
| 0.466609
| false
| 4.923349
| false
| false
| false
|
houmie/duelify
|
duelify/settings.py
|
1
|
12912
|
# Django settings for duelify project.
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
#LOGIN_ERROR_URL = '/error'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/new-users-invited/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/login-invited/'
SIGNUP_ERROR_URL = '/signup-error/'
LOGIN_ERROR_URL = '/signup-error/'
SOCIAL_AUTH_USER_MODEL = 'duelify_app.User'
TWITTER_CONSUMER_KEY = 'xxxx'
TWITTER_CONSUMER_SECRET = 'xxxx'
FACEBOOK_APP_ID = 'xxx'
FACEBOOK_API_SECRET = 'xxxx'
FACEBOOK_EXTENDED_PERMISSIONS = ['email', 'user_birthday', 'user_location']
#FACEBOOK_EXTRA_DATA = [('user_birthday', 'user_location')]
GOOGLE_OAUTH2_CLIENT_ID = 'xxxx'
GOOGLE_OAUTH2_CLIENT_SECRET = 'xxxx'
SOCIAL_AUTH_REDIRECT_IS_HTTPS = False
#SOCIAL_AUTH_RAISE_EXCEPTIONS = True
AUTH_USER_MODEL = 'duelify_app.User'
SITE_HOST = 'duelify.com:8000'
DEFAULT_FROM_EMAIL = 'info@duelify.com'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = '587'
EMAIL_HOST_USER = 'info@duelify.com'
EMAIL_HOST_PASSWORD = 'xxxxxx'
EMAIL_USE_TLS = True
SERVER_EMAIL = 'info@duelify.com'
EMAIL_SUBJECT_PREFIX = '[duelify]'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
GEOIP_PATH = '/home/hooman/venuscloud/duelify-env/site/database/'
#GEOS_LIBRARY_PATH = '/opt/geos/lib/libgeos_c.so'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Hooman', 'xxx@xxx.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'duelifydb', # Or path to database file if using sqlite3.
'USER': 'django_user', # Not used with sqlite3.
'PASSWORD': 'houmie123', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
PIPELINE_YUGLIFY_BINARY = '/home/hooman/venuscloud/duelify-env/node_modules/yuglify/bin/yuglify'
PIPELINE_CLOSURE_BINARY = '/home/hooman/venuscloud/duelify-env/bin/closure'
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
#PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.closure.ClosureCompressor'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS = {
'duelify_css': {
'source_filenames': (
'duelify/duelify.css',
),
'output_filename': 'duelify/duelify.min.css',
},
'bootstrap_datepicker_css': {
'source_filenames': (
'bootstrap-datepicker/css/datepicker.css',
),
'output_filename': 'bootstrap-datepicker/css/datepicker.min.css',
},
'social_buttons_css': {
'source_filenames': (
'css-social-buttons/css/zocial.css',
),
'output_filename': 'css-social-buttons/css/zocial.min.css',
},
}
PIPELINE_JS = {
'duelify_js': {
'source_filenames': (
'duelify/duelify.js',
),
'output_filename': 'duelify/duelify.min.js',
},
'bootstrap_datepicker_js': {
'source_filenames': (
'bootstrap-datepicker/js/bootstrap-datepicker.js',
),
'output_filename': 'bootstrap-datepicker/js/bootstrap-datepicker.min.js',
},
'ajaxform_js': {
'source_filenames': (
'ajaxform/jquery.form.js',
),
'output_filename': 'ajaxform/jquery.form.min.js',
},
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['duelify.com', 'www.duelify.com', '54.225.168.25']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '/home/hooman/venuscloud/duelify-env/site/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/hooman/venuscloud/duelify-env/site/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/home/hooman/venuscloud/duelify-env/site/static_files/',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
#TINYMCE_JS_URL = STATIC_URL + 'tinymce/js/tinymce/tinymce.min.js'
#TINYMCE_JS_URL = '/home/hooman/venuscloud/duelify-env/site/static_files/tinymce/js/tinymce/tinymce.min.js'
#TINYMCE_JS_ROOT = STATIC_ROOT + 'tinymce/js/tinymce'
#TINYMCE_JS_ROOT = '/home/hooman/venuscloud/duelify-env/site/static_files/tinymce/js/tinymce'
TINYMCE_COMPRESSOR = True
TINYMCE_DEFAULT_CONFIG = {
# General options
'mode' : "textareas",
'theme' : "advanced",
'plugins' : "media,pagebreak,style,layer,table,save,advhr,advimage,advlink,emotions,iespell,inlinepopups,insertdatetime,preview,media,searchreplace,print,contextmenu,paste,directionality,fullscreen,noneditable,visualchars,nonbreaking,xhtmlxtras,template,wordcount,advlist,autosave",
# 'plugins': "spellchecker,directionality,paste,searchreplace",
# 'language': "{{ language }}",
# 'directionality': "{{ directionality }}",
# 'spellchecker_languages' : "{{ spellchecker_languages }}",
# 'spellchecker_rpc_url' : "{{ spellchecker_rpc_url }}",
'theme_advanced_buttons1_add' : "media",
'theme_advanced_buttons2_add' : "advimage",
# Theme options
'theme_advanced_buttons1' : "bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,fontselect,fontsizeselect,", #fullscreen,code",
'theme_advanced_buttons2' : "bullist,numlist,|,outdent,indent,blockquote,|,undo,redo,|,link,unlink,|,forecolor,backcolor",
#'theme_advanced_buttons3' : "tablecontrols,|,hr,sub,sup,|,charmap",
'theme_advanced_toolbar_location' : "top",
'theme_advanced_toolbar_align' : "left",
'theme_advanced_statusbar_location' : "bottom",
'theme_advanced_resizing' : 'true',
#Example content CSS (should be your site CSS)
#content_css : "/css/style.css",
'template_external_list_url' : "lists/template_list.js",
'external_link_list_url' : "lists/link_list.js",
'external_image_list_url' : "lists/image_list.js",
'media_external_list_url' : "lists/media_list.js",
# Style formats
'style_formats' : [
{'title' : 'Bold text', 'inline' : 'strong'},
{'title' : 'Red text', 'inline' : 'span', 'styles' : {'color' : '#ff0000'}},
{'title' : 'Help', 'inline' : 'strong', 'classes' : 'help'},
{'title' : 'Table styles'},
{'title' : 'Table row 1', 'selector' : 'tr', 'classes' : 'tablerow'}
],
'width': '700',
'height': '400'
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'il8zx-az!ti=e-@m5u&q54q%_%aidnfj05jq4#c8ldax!h3mn2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
AUTHENTICATION_BACKENDS = ('social_auth.backends.facebook.FacebookBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'social_auth.backends.twitter.TwitterBackend',
'django.contrib.auth.backends.ModelBackend',)
#TEMPLATE_CONTEXT_PROCESSORS = (
# "django.contrib.auth.context_processors.auth",
# "django.core.context_processors.debug",
# "django.core.context_processors.i18n",
# "django.core.context_processors.media",
# "django.core.context_processors.static",
# "django.core.context_processors.tz",
# "django.contrib.messages.context_processors.messages",
# "django.core.context_processors.request",
# 'social_auth.context_processors.social_auth_by_name_backends',
# 'social_auth.context_processors.social_auth_backends',
# 'social_auth.context_processors.social_auth_by_type_backends',
# 'social_auth.context_processors.social_auth_login_redirect',
#)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'pipeline.middleware.MinifyHTMLMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'social_auth.middleware.SocialAuthExceptionMiddleware',
# Uncomment the next line for simple clickjacking protection:
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
'duelify_app.utils.social_media_save',
)
ROOT_URLCONF = 'duelify.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'duelify.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/home/hooman/venuscloud/duelify-env/site/templates/'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'widget_tweaks',
'tinymce',
'pipeline',
'south',
'django.contrib.sitemaps',
'social_auth',
'duelify_app',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
gpl-2.0
| -3,736,456,069,093,995,000
| 36.865103
| 286
| 0.679445
| false
| 3.324408
| false
| false
| false
|
uogbuji/pybibframe
|
lib/writer/exhibitexplorer.py
|
1
|
10035
|
'''
'''
import re
import sys
import logging
import itertools
#from datachef.ids import simple_hashstring
from amara3 import iri
from versa import SUBJECT, RELATIONSHIP, VALUE
BFV = 'http://bibframe.org/vocab/'
WORKCLASS = iri.absolutize('Work', BFV)
INSTANCECLASS = iri.absolutize('Instance', BFV)
TYPE_REL = I(iri.absolutize('type', BFZ))
def process(source, work_sink, instance_sink, objects_sink, annotations_sink, logger=logging):
'''
Take an in-memory BIBFRAME model and emit it in Exhibit-based explorer ready form
'''
subobjs = subobjects(objects_sink)
anns = annotations(annotations_sink)
@coroutine
def receive_items():
'''
Receives each resource bundle and processes it by creating an item
dict which is then forwarded to the sink
'''
ix = 1
while True:
workid = yield
#Extract the statements about the work
wstmts = source.match(workid)
rawid = u'_' + str(ix)
work_item = {
u'id': u'work' + rawid,
u'label': rawid,
#u'label': u'{0}, {1}'.format(row['TPNAML'], row['TPNAMF']),
u'type': u'WorkRecord',
}
#Instance starts with same as work, with leader added
instance_item = {
u'leader': leader,
}
instance_item.update(work_item)
instance_item[u'id'] = u'instance' + rawid
instance_item[u'type'] = u'InstanceRecord'
work_item[u'instance'] = u'instance' + rawid
for cf in rec.xml_select(u'ma:controlfield', prefixes=PREFIXES):
key = u'cftag_' + U(cf.xml_select(u'@tag'))
val = U(cf)
if list(cf.xml_select(u'ma:subfield', prefixes=PREFIXES)):
for sf in cf.xml_select(u'ma:subfield', prefixes=PREFIXES):
code = U(sf.xml_select(u'@code'))
sfval = U(sf)
#For now assume all leader fields are instance level
instance_item[key + code] = sfval
else:
#For now assume all leader fields are instance level
instance_item[key] = val
for df in rec.xml_select(u'ma:datafield', prefixes=PREFIXES):
code = U(df.xml_select(u'@tag'))
key = u'dftag_' + code
val = U(df)
if list(df.xml_select(u'ma:subfield', prefixes=PREFIXES)):
subfields = dict(( (U(sf.xml_select(u'@code')), U(sf)) for sf in df.xml_select(u'ma:subfield', prefixes=PREFIXES) ))
lookup = code
#See if any of the field codes represents a reference to an object which can be materialized
handled = False
if code in MATERIALIZE:
(subst, extra_props) = MATERIALIZE[code]
props = {u'marccode': code}
props.update(extra_props)
#props.update(other_properties)
props.update(subfields)
#work_item[FIELD_RENAMINGS.get(code, code)] = subid
subid = subobjs.add(props)
if code in INSTANCE_FIELDS:
instance_item.setdefault(subst, []).append(subid)
elif code in WORK_FIELDS:
work_item.setdefault(subst, []).append(subid)
handled = True
if code in MATERIALIZE_VIA_ANNOTATION:
(subst, extra_object_props, extra_annotation_props) = MATERIALIZE_VIA_ANNOTATION[code]
object_props = {u'marccode': code}
object_props.update(extra_object_props)
#props.update(other_properties)
#Separate annotation subfields from object subfields
object_subfields = subfields.copy()
annotation_subfields = {}
for k, v in object_subfields.items():
if code+k in ANNOTATIONS_FIELDS:
annotation_subfields[k] = v
del object_subfields[k]
object_props.update(object_subfields)
objectid = subobjs.add(object_props)
ann_props = {subst: objectid, u'on_work': work_item[u'id'], u'on_instance': instance_item[u'id'],}
ann_props.update(extra_annotation_props)
ann_props.update(annotation_subfields)
annid = anns.add(ann_props)
#Note, even though we have the returned annotation ID we do not use it. No link back from work/instance to annotation
print >> sys.stderr, '.',
if code in INSTANCE_FIELDS:
instance_item.setdefault('annotation', []).append(annid)
elif code in WORK_FIELDS:
work_item.setdefault('annotation', []).append(annid)
#The actual subfields go to the annotations sink
#annotations_props = {u'annotates': instance_item[u'id']}
#annotations_props.update(props)
#subid = subobjs.add(annotations_props, annotations_sink)
#The reference is from the instance ID
#instance_item.setdefault(subst, []).append(subid)
handled = True
#work_item.setdefault(FIELD_RENAMINGS.get(code, code), []).append(subid)
#See if any of the field+subfield codes represents a reference to an object which can be materialized
if not handled:
for k, v in subfields.items():
lookup = code + k
if lookup in MATERIALIZE:
(subst, extra_props) = MATERIALIZE[lookup]
props = {u'marccode': code, k: v}
props.update(extra_props)
#print >> sys.stderr, lookup, k, props,
subid = subobjs.add(props)
if lookup in INSTANCE_FIELDS or code in INSTANCE_FIELDS:
instance_item.setdefault(subst, []).append(subid)
elif lookup in WORK_FIELDS or code in WORK_FIELDS:
work_item.setdefault(subst, []).append(subid)
handled = True
else:
field_name = u'dftag_' + lookup
if lookup in FIELD_RENAMINGS:
field_name = FIELD_RENAMINGS[lookup]
#Handle the simple field_nameitution of a label name for a MARC code
if lookup in INSTANCE_FIELDS or code in INSTANCE_FIELDS:
instance_item.setdefault(field_name, []).append(v)
elif lookup in WORK_FIELDS or code in WORK_FIELDS:
work_item.setdefault(field_name, []).append(v)
#print >> sys.stderr, lookup, key
elif not handled:
if code in INSTANCE_FIELDS:
instance_item[key] = val
elif code in WORK_FIELDS:
work_item[key] = val
else:
if code in INSTANCE_FIELDS:
instance_item[key] = val
elif code in WORK_FIELDS:
work_item[key] = val
#link = work_item.get(u'cftag_008')
#Handle ISBNs re: https://foundry.zepheira.com/issues/1976
new_instances = []
if not new_instances:
#Make sure it's created as an instance even if it has no ISBN
new_instances.append(instance_item)
instance_ids.append(base_instance_id)
work_item[u'instance'] = instance_ids
special_properties = {}
for k, v in process_leader(leader):
special_properties.setdefault(k, set()).add(v)
for k, v in process_008(instance_item[u'cftag_008']):
special_properties.setdefault(k, set()).add(v)
#We get some repeated values out of leader & 008 processing, and we want to
#Remove dupes so we did so by working with sets then converting to lists
for k, v in special_properties.items():
special_properties[k] = list(v)
instance_item.update(special_properties)
#reduce lists of just one item
for k, v in work_item.items():
if type(v) is list and len(v) == 1:
work_item[k] = v[0]
work_sink.send(work_item)
def send_instance(instance):
for k, v in instance.items():
if type(v) is list and len(v) == 1:
instance[k] = v[0]
instance_sink.send(instance)
for ninst in new_instances:
send_instance(ninst)
#stub_item = {
# u'id': rawid,
# u'label': rawid,
# u'type': u'MarcRecord',
#}
#stub_sink.send(stub_item)
ix += 1
print >> sys.stderr, '+',
return
target = receive_items()
for stmt in source.match(None, TYPE_REL, WORKCLASS):
workid = stmt[SUBJECT]
target.send(workid)
target.close()
return
|
apache-2.0
| 325,627,981,747,450,900
| 41.163866
| 141
| 0.489686
| false
| 4.479911
| false
| false
| false
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/wat.py
|
1
|
5036
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
unified_strdate,
HEADRequest,
int_or_none,
)
class WatIE(InfoExtractor):
_VALID_URL = r'(?:wat:|https?://(?:www\.)?wat\.tv/video/.*-)(?P<id>[0-9a-z]+)'
IE_NAME = 'wat.tv'
_TESTS = [
{
'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html',
'info_dict': {
'id': '11713067',
'ext': 'mp4',
'title': 'Soupe de figues à l\'orange et aux épices',
'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.',
'upload_date': '20140819',
'duration': 120,
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['HTTP Error 404'],
},
{
'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
'md5': 'b16574df2c3cd1a36ca0098f2a791925',
'info_dict': {
'id': '11713075',
'ext': 'mp4',
'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)',
'upload_date': '20140816',
},
'expected_warnings': ["Ce contenu n'est pas disponible pour l'instant."],
},
]
_FORMATS = (
(200, 416, 234),
(400, 480, 270),
(600, 640, 360),
(1200, 640, 360),
(1800, 960, 540),
(2500, 1280, 720),
)
def _real_extract(self, url):
video_id = self._match_id(url)
video_id = video_id if video_id.isdigit() and len(video_id) > 6 else compat_str(int(video_id, 36))
# 'contentv4' is used in the website, but it also returns the related
# videos, we don't need them
video_data = self._download_json(
'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
video_info = video_data['media']
error_desc = video_info.get('error_desc')
if error_desc:
self.report_warning(
'%s returned error: %s' % (self.IE_NAME, error_desc))
chapters = video_info['chapters']
if chapters:
first_chapter = chapters[0]
def video_id_for_chapter(chapter):
return chapter['tc_start'].split('-')[0]
if video_id_for_chapter(first_chapter) != video_id:
self.to_screen('Multipart video detected')
entries = [self.url_result('wat:%s' % video_id_for_chapter(chapter)) for chapter in chapters]
return self.playlist_result(entries, video_id, video_info['title'])
# Otherwise we can continue and extract just one part, we have to use
# the video id for getting the video url
else:
first_chapter = video_info
title = first_chapter['title']
def extract_url(path_template, url_type):
req_url = 'http://www.wat.tv/get/%s' % (path_template % video_id)
head = self._request_webpage(HEADRequest(req_url), video_id, 'Extracting %s url' % url_type, fatal=False)
if head:
red_url = head.geturl()
if req_url != red_url:
return red_url
return None
def remove_bitrate_limit(manifest_url):
return re.sub(r'(?:max|min)_bitrate=\d+&?', '', manifest_url)
formats = []
try:
alt_urls = lambda manifest_url: [re.sub(r'(?:wdv|ssm)?\.ism/', repl + '.ism/', manifest_url) for repl in
('', 'ssm')]
manifest_urls = self._download_json(
'http://www.wat.tv/get/webhtml/' + video_id, video_id)
m3u8_url = manifest_urls.get('hls')
if m3u8_url:
m3u8_url = remove_bitrate_limit(m3u8_url)
for m3u8_alt_url in alt_urls(m3u8_url):
formats.extend(self._extract_m3u8_formats(
m3u8_alt_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
formats.extend(self._extract_f4m_formats(
m3u8_alt_url.replace('ios', 'web').replace('.m3u8', '.f4m'),
video_id, f4m_id='hds', fatal=False))
mpd_url = manifest_urls.get('mpd')
if mpd_url:
mpd_url = remove_bitrate_limit(mpd_url)
for mpd_alt_url in alt_urls(mpd_url):
formats.extend(self._extract_mpd_formats(
mpd_alt_url, video_id, mpd_id='dash', fatal=False))
self._sort_formats(formats)
except ExtractorError:
abr = 64
for vbr, width, height in self._FORMATS:
tbr = vbr + abr
format_id = 'http-%s' % tbr
fmt_url = 'http://dnl.adv.tf1.fr/2/USP-0x0/%s/%s/%s/ssm/%s-%s-64k.mp4' % (
video_id[-4:-2], video_id[-2:], video_id, video_id, vbr)
if self._is_valid_url(fmt_url, video_id, format_id):
formats.append({
'format_id': format_id,
'url': fmt_url,
'vbr': vbr,
'abr': abr,
'width': width,
'height': height,
})
date_diffusion = first_chapter.get('date_diffusion') or video_data.get('configv4', {}).get('estatS4')
upload_date = unified_strdate(date_diffusion) if date_diffusion else None
duration = None
files = video_info['files']
if files:
duration = int_or_none(files[0].get('duration'))
return {
'id': video_id,
'title': title,
'thumbnail': first_chapter.get('preview'),
'description': first_chapter.get('description'),
'view_count': int_or_none(video_info.get('views')),
'upload_date': upload_date,
'duration': duration,
'formats': formats,
}
|
gpl-3.0
| -3,411,929,487,945,467,400
| 30.628931
| 108
| 0.626566
| false
| 2.60705
| false
| false
| false
|
benvanwerkhoven/kernel_tuner
|
examples/cuda/convolution_correct.py
|
1
|
3195
|
#!/usr/bin/env python
""" convolution with correctness checks
This example is mostly the same as the Convolution example. The only
difference is that a naive kernel is used to compute a reference
output. This reference output is used to check the correctness of
every kernel before it is benchmarked.
This is done using the run_kernel() function of the Kernel Tuner and
the `answer` option of the tune_kernel function.
The run_kernel function simply runs a kernel using much of the same
interface as tune_kernel, however, for each tuning_parameter you pass
a single value instead of a list of options. The run_kernel function
returns a list of arguments that contains the output of the kernel.
When calling tune_kernel you specify the `answer` as a list, which
is similar to the arguments list of the kernel. To separate input
and output arguments you insert a `None` value in the answer list
for all arguments that are actually inputs to the kernel. The
values in the answers list that are not None are used to verify
the correctness of every kernel in the parameter space before it is
benchmarked.
"""
import numpy
import kernel_tuner
from collections import OrderedDict
def tune():
with open('convolution.cu', 'r') as f:
kernel_string = f.read()
filter_size = (17, 17)
problem_size = (4096, 4096)
size = numpy.prod(problem_size)
border_size = (filter_size[0]//2*2, filter_size[1]//2*2)
input_size = ((problem_size[0]+border_size[0]) * (problem_size[1]+border_size[1]))
output = numpy.zeros(size).astype(numpy.float32)
input = numpy.random.randn(input_size).astype(numpy.float32)
filter = numpy.random.randn(filter_size[0]*filter_size[1]).astype(numpy.float32)
cmem_args= {'d_filter': filter }
args = [output, input, filter]
tune_params = OrderedDict()
tune_params["filter_width"] = [filter_size[0]]
tune_params["filter_height"] = [filter_size[1]]
tune_params["block_size_x"] = [16*i for i in range(1,9)]
tune_params["block_size_y"] = [2**i for i in range(1,6)]
tune_params["tile_size_x"] = [2**i for i in range(3)]
tune_params["tile_size_y"] = [2**i for i in range(3)]
tune_params["use_padding"] = [0,1] #toggle the insertion of padding in shared memory
tune_params["read_only"] = [0,1] #toggle using the read-only cache
grid_div_x = ["block_size_x", "tile_size_x"]
grid_div_y = ["block_size_y", "tile_size_y"]
#compute the answer using a naive kernel
params = { "block_size_x": 16, "block_size_y": 16}
tune_params["filter_width"] = [filter_size[0]]
tune_params["filter_height"] = [filter_size[1]]
results = kernel_tuner.run_kernel("convolution_naive", kernel_string,
problem_size, args, params,
grid_div_y=["block_size_y"], grid_div_x=["block_size_x"])
#set non-output fields to None
answer = [results[0], None, None]
#start kernel tuning with correctness verification
return kernel_tuner.tune_kernel("convolution_kernel", kernel_string,
problem_size, args, tune_params,
grid_div_y=grid_div_y, grid_div_x=grid_div_x, verbose=True, cmem_args=cmem_args, answer=answer)
if __name__ == "__main__":
tune()
|
apache-2.0
| -1,021,302,405,249,030,900
| 38.9375
| 103
| 0.692019
| false
| 3.352571
| false
| false
| false
|
lacatus/TFM
|
datasets/pets095.py
|
1
|
3494
|
#!/usr/bin/env python
from datasets import cp
from datasets import variables
from datasets import Camera
def loaddataset():
setglobalvariables()
loadcameras()
return getcameras(), loadconfiguration()
def setglobalvariables():
variables.current_dataset_path = variables.datasets_path + '/pets09'
variables.current_video_path = variables.current_dataset_path + \
'/s0/regularflow/time_14_03'
def loadcameras():
global cam1_g1
global cam2_g1
global cam3_g1
global cam4_g1
cam1_g1 = Camera()
cam2_g1 = Camera()
cam3_g1 = Camera()
cam4_g1 = Camera()
cam1_g1.video.readvideo(variables.current_video_path + '/camera001.avi')
cam2_g1.video.readvideo(variables.current_video_path + '/camera002.avi')
cam3_g1.video.readvideo(variables.current_video_path + '/camera003.avi')
cam4_g1.video.readvideo(variables.current_video_path + '/camera004.avi')
cam1_g1.video.readbg(
variables.current_video_path + '/background/camera001.jpg')
cam2_g1.video.readbg(
variables.current_video_path + '/background/camera002.jpg')
cam3_g1.video.readbg(
variables.current_video_path + '/background/camera003.jpg')
cam4_g1.video.readbg(
variables.current_video_path + '/background/camera004.jpg')
cam1_str = variables.current_dataset_path + '/cameracalib/camera001.cfg'
cam2_str = variables.current_dataset_path + '/cameracalib/camera002.cfg'
cam3_str = variables.current_dataset_path + '/cameracalib/camera003.cfg'
cam4_str = variables.current_dataset_path + '/cameracalib/camera004.cfg'
cam1_g1.readconfigfile(cam1_str)
cam2_g1.readconfigfile(cam2_str)
cam3_g1.readconfigfile(cam3_str)
cam4_g1.readconfigfile(cam4_str)
def loadglobalconfiguration(c):
dst = {
'option': c.getint('global', 'option'),
'alpha': c.getfloat('global', 'alpha'),
'beta': c.getfloat('global', 'beta'),
'frame_count': c.getint('global', 'frame_count'),
'threshold_1': c.getint('global', 'threshold_1'),
'threshold_2': c.getint('global', 'threshold_2'),
'waitkey': c.getint('global', 'waitkey')
}
return dst
def loadcamconfiguration(c, cam_id):
dst = {
'win_height': c.getint(cam_id, 'win_height'),
'win_width': c.getint(cam_id, 'win_width'),
'win_min_pix': c.getint(cam_id, 'win_min_pix')
}
return dst
def loadconfiguration():
config_file = variables.current_video_path + '/configuration/config.cfg'
c = cp.ConfigParser()
c.read(config_file)
configuration = {
'global': loadglobalconfiguration(c),
'Camera001': loadcamconfiguration(c, 'Camera001'),
'Camera002': loadcamconfiguration(c, 'Camera002'),
'Camera003': loadcamconfiguration(c, 'Camera003'),
'Camera004': loadcamconfiguration(c, 'Camera004'),
'dir': config_file
}
return configuration
def getcam1():
return cam1_g1
def getcam2():
return cam2_g1
def getcam3():
return cam3_g1
def getcam4():
return cam4_g1
def getcameras():
cam1 = getcam1()
cam2 = getcam2()
cam3 = getcam3()
cam4 = getcam4()
cam1.printcamerainfo()
cam2.printcamerainfo()
cam3.printcamerainfo()
cam4.printcamerainfo()
return [cam1, cam2, cam3, cam4]
def printcamerainfo():
cam1_g1.printcamerainfo()
cam2_g1.printcamerainfo()
cam3_g1.printcamerainfo()
cam4_g1.printcamerainfo()
|
apache-2.0
| 7,430,277,438,438,640,000
| 23.263889
| 76
| 0.655409
| false
| 3.217311
| true
| false
| false
|
noironetworks/heat
|
heat/common/policy.py
|
1
|
6824
|
#
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Based on glance/api/policy.py
"""Policy Engine For Heat."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_RULES = policy.Rules.from_dict({'default': '!'})
DEFAULT_RESOURCE_RULES = policy.Rules.from_dict({'default': '@'})
ENFORCER = None
class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, scope='heat', exc=exception.Forbidden,
default_rule=DEFAULT_RULES['default'], policy_file=None):
self.scope = scope
self.exc = exc
self.default_rule = default_rule
self.enforcer = policy.Enforcer(
CONF, default_rule=default_rule, policy_file=policy_file)
self.log_not_registered = True
# register rules
self.enforcer.register_defaults(policies.list_rules())
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules."""
rules_obj = policy.Rules(rules, self.default_rule)
self.enforcer.set_rules(rules_obj, overwrite)
def load_rules(self, force_reload=False):
"""Set the rules found in the json file on disk."""
self.enforcer.load_rules(force_reload)
def _check(self, context, rule, target, exc,
is_registered_policy=False, *args, **kwargs):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param rule: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
do_raise = False if not exc else True
credentials = context.to_policy_values()
if is_registered_policy:
try:
return self.enforcer.authorize(rule, target, credentials,
do_raise=do_raise,
exc=exc, action=rule)
except policy.PolicyNotRegistered:
if self.log_not_registered:
with excutils.save_and_reraise_exception():
LOG.exception(_('Policy not registered.'))
else:
raise
else:
return self.enforcer.enforce(rule, target, credentials,
do_raise, exc=exc, *args, **kwargs)
def enforce(self, context, action, scope=None, target=None,
is_registered_policy=False):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
_action = '%s:%s' % (scope or self.scope, action)
_target = target or {}
return self._check(context, _action, _target, self.exc, action=action,
is_registered_policy=is_registered_policy)
def check_is_admin(self, context):
"""Whether or not is admin according to policy.
By default the rule will check whether or not roles contains
'admin' role and is admin project.
:param context: Heat request context
:returns: A non-False value if the user is admin according to policy
"""
return self._check(context, 'context_is_admin', target={}, exc=None,
is_registered_policy=True)
def get_enforcer():
global ENFORCER
if ENFORCER is None:
ENFORCER = Enforcer()
return ENFORCER
class ResourceEnforcer(Enforcer):
def __init__(self, default_rule=DEFAULT_RESOURCE_RULES['default'],
**kwargs):
super(ResourceEnforcer, self).__init__(
default_rule=default_rule, **kwargs)
self.log_not_registered = False
def _enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
try:
result = super(ResourceEnforcer, self).enforce(
context, res_type,
scope=scope or 'resource_types',
target=target, is_registered_policy=is_registered_policy)
except policy.PolicyNotRegistered:
result = True
except self.exc as ex:
LOG.info(six.text_type(ex))
raise
if not result:
if self.exc:
raise self.exc(action=res_type)
return result
def enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
# NOTE(pas-ha): try/except just to log the exception
result = self._enforce(context, res_type, scope, target,
is_registered_policy=is_registered_policy)
if result:
# check for wildcard resource types
subparts = res_type.split("::")[:-1]
subparts.append('*')
res_type_wc = "::".join(subparts)
try:
return self._enforce(context, res_type_wc, scope, target,
is_registered_policy=is_registered_policy)
except self.exc:
raise self.exc(action=res_type)
return result
def enforce_stack(self, stack, scope=None, target=None,
is_registered_policy=False):
for res in stack.resources.values():
self.enforce(stack.context, res.type(), scope=scope, target=target,
is_registered_policy=is_registered_policy)
|
apache-2.0
| -8,548,374,885,071,395,000
| 37.994286
| 79
| 0.603605
| false
| 4.305363
| false
| false
| false
|
vlukes/dicom2fem
|
setup.py
|
1
|
3448
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='dicom2fem',
description='Generation of finite element meshes from DICOM images',
long_desctioption="Generation of finite element meshes using computed " +
"tomography scans. Segmentation is based on the graph cut algorithm.",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.0.0',
url='https://github.com/vlukes/dicom2fem',
author='Vladimir Lukes',
author_email='vlukes@kme.zcu.cz',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='fem dicom',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['dist', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['numpy', 'pysegbase'],
dependency_links=['https://github.com/mjirik/gco_python'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
bsd-3-clause
| -117,617,626,211,072,220
| 42.1
| 98
| 0.657193
| false
| 3.981524
| false
| false
| false
|
Herpinemmanuel/Oceanography
|
intergrid.py
|
1
|
8478
|
""" interpolate data given on an Nd rectangular grid, uniform or non-uniform.
Purpose: extend the fast N-dimensional interpolator
`scipy.ndimage.map_coordinates` to non-uniform grids, using `np.interp`.
Background: please look at
http://en.wikipedia.org/wiki/Bilinear_interpolation
http://stackoverflow.com/questions/6238250/multivariate-spline-interpolation-in-python-scipy
http://docs.scipy.org/doc/scipy-dev/reference/generated/scipy.ndimage.interpolation.map_coordinates.html
Example
-------
Say we have rainfall on a 4 x 5 grid of rectangles, lat 52 .. 55 x lon -10 .. -6,
and want to interpolate (estimate) rainfall at 1000 query points
in between the grid points.
# define the grid --
griddata = np.loadtxt(...) # griddata.shape == (4, 5)
lo = np.array([ 52, -10 ]) # lowest lat, lowest lon
hi = np.array([ 55, -6 ]) # highest lat, highest lon
# set up an interpolator function "interfunc()" with class Intergrid --
interfunc = Intergrid( griddata, lo=lo, hi=hi )
# generate 1000 random query points, lo <= [lat, lon] <= hi --
query_points = lo + np.random.uniform( size=(1000, 2) ) * (hi - lo)
# get rainfall at the 1000 query points --
query_values = interfunc( query_points ) # -> 1000 values
What this does:
for each [lat, lon] in query_points:
1) find the square of griddata it's in,
e.g. [52.5, -8.1] -> [0, 3] [0, 4] [1, 4] [1, 3]
2) do bilinear (multilinear) interpolation in that square,
using `scipy.ndimage.map_coordinates` .
Check:
interfunc( lo ) -> griddata[0, 0],
interfunc( hi ) -> griddata[-1, -1] i.e. griddata[3, 4]
Parameters
----------
griddata: numpy array_like, 2d 3d 4d ...
lo, hi: user coordinates of the corners of griddata, 1d array-like, lo < hi
maps: a list of `dim` descriptors of piecewise-linear or nonlinear maps,
e.g. [[50, 52, 62, 63], None] # uniformize lat, linear lon
copy: make a copy of query_points, default True;
copy=False overwrites query_points, runs in less memory
verbose: default 1: print a 1-line summary for each call, with run time
order=1: see `map_coordinates`
prefilter: 0 or False, the default: smoothing B-spline
1 or True: exact-fit interpolating spline (IIR, not C-R)
1/3: Mitchell-Netravali spline, 1/3 B + 2/3 fit
(prefilter is only for order > 1, since order = 1 interpolates)
Non-uniform rectangular grids
-----------------------------
What if our griddata above is at non-uniformly-spaced latitudes,
say [50, 52, 62, 63] ? `Intergrid` can "uniformize" these
before interpolation, like this:
lo = np.array([ 50, -10 ])
hi = np.array([ 63, -6 ])
maps = [[50, 52, 62, 63], None] # uniformize lat, linear lon
interfunc = Intergrid( griddata, lo=lo, hi=hi, maps=maps )
This will map (transform, stretch, warp) the lats in query_points column 0
to array coordinates in the range 0 .. 3, using `np.interp` to do
piecewise-linear (PWL) mapping:
50 51 52 53 54 55 56 57 58 59 60 61 62 63 # lo[0] .. hi[0]
0 .5 1 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2 3
`maps[1] None` says to map the lons in query_points column 1 linearly:
-10 -9 -8 -7 -6 # lo[1] .. hi[1]
0 1 2 3 4
More doc: https://denis-bz.github.com/docs/intergrid.html
"""
# split class Gridmap ?
from __future__ import division
from time import time
# warnings
import numpy as np
from scipy.ndimage import map_coordinates, spline_filter
__version__ = "2014-05-09 leif denby" # 9may: fix bug default argument bug
__author_email__ = "denis-bz-py@t-online.de" # comments welcome, testcases most welcome
#...............................................................................
class Intergrid:
__doc__ = globals()["__doc__"]
def __init__( self, griddata, lo, hi, maps=None, copy=True, verbose=1,
order=1, prefilter=False ):
griddata = np.asanyarray( griddata )
dim = griddata.ndim # - (griddata.shape[-1] == 1) # ??
assert dim >= 2, griddata.shape
self.dim = dim
if np.isscalar(lo):
lo *= np.ones(dim)
if np.isscalar(hi):
hi *= np.ones(dim)
self.loclip = lo = np.asarray_chkfinite( lo ).copy()
self.hiclip = hi = np.asarray_chkfinite( hi ).copy()
assert lo.shape == (dim,), lo.shape
assert hi.shape == (dim,), hi.shape
self.copy = copy
self.verbose = verbose
self.order = order
if order > 1 and 0 < prefilter < 1: # 1/3: Mitchell-Netravali = 1/3 B + 2/3 fit
exactfit = spline_filter( griddata ) # see Unser
griddata += prefilter * (exactfit - griddata)
prefilter = False
self.griddata = griddata
self.prefilter = (prefilter == True)
if maps is None:
maps = [None,] * len(lo)
self.maps = maps
self.nmap = 0
if len(maps) > 0:
assert len(maps) == dim, "maps must have len %d, not %d" % (
dim, len(maps))
# linear maps (map None): Xcol -= lo *= scale -> [0, n-1]
# nonlinear: np.interp e.g. [50 52 62 63] -> [0 1 2 3]
self._lo = np.zeros(dim)
self._scale = np.ones(dim)
for j, (map, n, l, h) in enumerate( zip( maps, griddata.shape, lo, hi )):
## print "test: j map n l h:", j, map, n, l, h
if map is None or callable(map):
self._lo[j] = l
if h > l:
self._scale[j] = (n - 1) / (h - l) # _map lo -> 0, hi -> n - 1
else:
self._scale[j] = 0 # h <= l: X[:,j] -> 0
continue
self.maps[j] = map = np.asanyarray(map)
self.nmap += 1
assert len(map) == n, "maps[%d] must have len %d, not %d" % (
j, n, len(map) )
mlo, mhi = map.min(), map.max()
if not (l <= mlo <= mhi <= h):
print ("Warning: Intergrid maps[%d] min %.3g max %.3g " \
"are outside lo %.3g hi %.3g" % (
j, mlo, mhi, l, h ))
#...............................................................................
def _map_to_uniform_grid( self, X ):
""" clip, map X linear / nonlinear inplace """
np.clip( X, self.loclip, self.hiclip, out=X )
# X nonlinear maps inplace --
for j, map in enumerate(self.maps):
if map is None:
continue
if callable(map):
X[:,j] = map( X[:,j] ) # clip again ?
else:
# PWL e.g. [50 52 62 63] -> [0 1 2 3] --
X[:,j] = np.interp( X[:,j], map, np.arange(len(map)) )
# linear map the rest, inplace (nonlinear _lo 0, _scale 1: noop)
if self.nmap < self.dim:
X -= self._lo
X *= self._scale # (griddata.shape - 1) / (hi - lo)
## print "test: _map_to_uniform_grid", X.T
#...............................................................................
def __call__( self, X, out=None ):
""" query_values = Intergrid(...) ( query_points npt x dim )
"""
X = np.asanyarray(X)
assert X.shape[-1] == self.dim, ("the query array must have %d columns, "
"but its shape is %s" % (self.dim, X.shape) )
Xdim = X.ndim
if Xdim == 1:
X = np.asarray([X]) # in a single point -> out scalar
if self.copy:
X = X.copy()
assert X.ndim == 2, X.shape
npt = X.shape[0]
if out is None:
out = np.empty( npt, dtype=self.griddata.dtype )
t0 = time()
self._map_to_uniform_grid( X ) # X inplace
#...............................................................................
map_coordinates( self.griddata, X.T,
order=self.order, prefilter=self.prefilter,
mode="nearest", # outside -> edge
# test: mode="constant", cval=np.NaN,
output=out )
if self.verbose:
print ("Intergrid: %.3g msec %d points in a %s grid %d maps order %d" % (
(time() - t0) * 1000, npt, self.griddata.shape, self.nmap, self.order ))
return out if Xdim == 2 else out[0]
at = __call__
# end intergrid.py
|
mit
| 2,374,753,777,518,892,500
| 40.763547
| 104
| 0.525124
| false
| 3.372315
| false
| false
| false
|
openstack/rally
|
rally/common/plugin/info.py
|
1
|
4372
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
PARAM_OR_RETURNS_REGEX = re.compile(":(?:param|returns)")
RETURNS_REGEX = re.compile(":returns: (?P<doc>.*)", re.S)
PARAM_REGEX = re.compile(r":param (?P<name>[\*\w]+): (?P<doc>.*?)"
r"(?:(?=:param)|(?=:return)|(?=:raises)|\Z)", re.S)
def trim(docstring):
"""trim function from PEP-257"""
if not docstring:
return ""
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Current code/unittests expects a line return at
# end of multiline docstrings
# workaround expected behavior from unittests
if "\n" in docstring:
trimmed.append("")
# Return a single string:
return "\n".join(trimmed)
def reindent(string):
return "\n".join(line.strip() for line in string.strip().split("\n"))
def parse_docstring(docstring):
"""Parse the docstring into its components.
:returns: a dictionary of form
{
"short_description": ...,
"long_description": ...,
"params": [{"name": ..., "doc": ...}, ...],
"returns": ...
}
"""
short_description = long_description = returns = ""
params = []
if docstring:
docstring = trim(docstring.lstrip("\n"))
lines = docstring.split("\n", 1)
short_description = lines[0]
if len(lines) > 1:
long_description = lines[1].strip()
params_returns_desc = None
match = PARAM_OR_RETURNS_REGEX.search(long_description)
if match:
long_desc_end = match.start()
params_returns_desc = long_description[long_desc_end:].strip()
long_description = long_description[:long_desc_end].rstrip()
if params_returns_desc:
params = [
{"name": name, "doc": trim(doc)}
for name, doc in PARAM_REGEX.findall(params_returns_desc)
]
match = RETURNS_REGEX.search(params_returns_desc)
if match:
returns = reindent(match.group("doc"))
return {
"short_description": short_description,
"long_description": long_description,
"params": params,
"returns": returns
}
class InfoMixin(object):
@classmethod
def _get_doc(cls):
"""Return documentary of class
By default it returns docstring of class, but it can be overridden
for example for cases like merging own docstring with parent
"""
return cls.__doc__
@classmethod
def get_info(cls):
doc = parse_docstring(cls._get_doc())
return {
"name": cls.get_name(),
"platform": cls.get_platform(),
"module": cls.__module__,
"title": doc["short_description"],
"description": doc["long_description"],
"parameters": doc["params"],
"schema": getattr(cls, "CONFIG_SCHEMA", None),
"returns": doc["returns"]
}
|
apache-2.0
| -1,733,923,676,359,351,300
| 30.912409
| 78
| 0.576624
| false
| 4.220077
| false
| false
| false
|
beefoo/hollywood-diversity
|
scripts/imdb_get_images.py
|
1
|
2896
|
# -*- coding: utf-8 -*-
# Description:
# This file takes in a .csv file of people and retrieves their images from IMDB if they exist
# Example usage:
# python imdb_get_images.py ../data/people_box_office_top_50_movies_1995-2014_imdb.csv
# python imdb_get_images.py ../data/people_box_office_top_10_movies_2011-2015_imdb_subset.csv
from bs4 import BeautifulSoup
import csv
import sys
import urllib2
if len(sys.argv) < 1:
print "Usage: %s <inputfile csv>" % sys.argv[0]
sys.exit(1)
PEOPLE_FILE = sys.argv[1]
overwrite_existing = False
update_file = True
save_after = 10
images = {}
people = []
headers = []
headers_to_add = ['img']
with open(PEOPLE_FILE, 'rb') as f:
rows = csv.reader(f, delimiter=',')
headers = next(rows, None) # remove header
if 'imdb_id' not in headers:
print PEOPLE_FILE + " must have column <imdb_id>"
sys.exit(1)
# init people list
for h in headers_to_add:
if h not in headers:
headers.append(h)
# populate people list
for row in rows:
person = {}
for i, h in enumerate(headers):
if (i >= len(row)): # doesn't exist, add as blank
person[h] = ''
else:
person[h] = row[i]
people.append(person)
def save_people():
global PEOPLE_FILE
global headers
global people
# Write data back to file
with open(PEOPLE_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(headers)
for p in people:
row = []
for h in headers:
row.append(p[h])
w.writerow(row)
print('Successfully updated file: '+PEOPLE_FILE)
unsaved = 0
for i, p in enumerate(people):
save = False
# Image was already found for this person
if p['imdb_id'] in images:
people[i]['img'] = images[p['imdb_id']]
unsaved += 1
# Otherwise, fetch remote page and parse for image
elif overwrite_existing or not p['img']:
try:
html_contents = urllib2.urlopen("http://akas.imdb.com/name/nm"+p['imdb_id']+"/").read()
contents = BeautifulSoup(html_contents, 'html.parser')
image_srcs = contents.findAll('link', rel='image_src')
except:
print("URL Error: " + "http://akas.imdb.com/name/nm"+p['imdb_id']+"/")
image_srcs = [{'href': ''}]
image_src = 'none'
# image found
if len(image_srcs):
image_src = image_srcs[0]['href']
# image is default image
if 'imdb_fb_logo' in image_src:
image_src = 'none'
people[i]['img'] = image_src
images[p['imdb_id']] = image_src
unsaved += 1
print 'Found ' + str(i) + '. ' + people[i]['img'] + ' for '+p['imdb_id']
# Save data
if update_file and unsaved >= save_after:
save_people()
unsaved = 0
save_people()
|
mit
| -4,167,225,041,933,074,400
| 28.85567
| 99
| 0.56837
| false
| 3.399061
| false
| false
| false
|
sburnett/bismark-release-manager
|
main.py
|
1
|
17515
|
#!/usr/bin/env python2.7
import argparse
import logging
import os
import subcommands
import tree
def create_groups_subcommands(subparsers):
parser_list_group = subparsers.add_parser(
'list', help='list nodes in a groups')
parser_list_group.add_argument(
'name', type=str, nargs='?', action='store', help='name of the group to list')
parser_list_group.set_defaults(handler=subcommands.list_group)
parser_list_all_groups = subparsers.add_parser(
'list-all', help='list all groups of nodes')
parser_list_all_groups.set_defaults(handler=subcommands.list_all_groups)
parser_new_group = subparsers.add_parser(
'new', help='create a new group of nodes')
parser_new_group.add_argument(
'name', type=str, action='store', help='name of the new group')
parser_new_group.add_argument(
'node', nargs='*', type=str, action='store', help='nodes to add')
parser_new_group.set_defaults(handler=subcommands.new_group)
parser_copy_group = subparsers.add_parser(
'copy', help='copy a group of nodes')
parser_copy_group.add_argument(
'name', type=str, action='store', help='name of the group to copy')
parser_copy_group.add_argument(
'new_name', type=str, action='store', help='name of the new copy')
parser_copy_group.set_defaults(handler=subcommands.copy_group)
parser_delete_group = subparsers.add_parser(
'delete', help='delete a group of nodes')
parser_delete_group.add_argument(
'name', type=str, action='store', help='name of the group to delete')
parser_delete_group.set_defaults(handler=subcommands.delete_group)
parser_add_to_group = subparsers.add_parser(
'add-nodes', help='add nodes to a group')
parser_add_to_group.add_argument(
'group', type=str, action='store', help='name of the group')
parser_add_to_group.add_argument(
'node', nargs='+', type=str, action='store', help='nodes to add')
parser_add_to_group.set_defaults(handler=subcommands.add_to_group)
parser_remove_from_group = subparsers.add_parser(
'remove-nodes', help='remove nodes from a group')
parser_remove_from_group.add_argument(
'group', type=str, action='store', help='name of the group')
parser_remove_from_group.add_argument(
'node', nargs='+', type=str, action='store', help='nodes to remove')
parser_remove_from_group.set_defaults(
handler=subcommands.remove_from_group)
def create_experiments_subcommands(subparsers):
parser_new_experiment = subparsers.add_parser(
'new', help='create a new experiment')
parser_new_experiment.add_argument(
'name', type=str, action='store', help='name of the new experiment')
parser_new_experiment.set_defaults(handler=subcommands.new_experiment)
parser_add_to_experiment = subparsers.add_parser(
'add-package', help='add a package to an experiment')
parser_add_to_experiment.add_argument(
'experiment', type=str, action='store', help='experiment identifier')
parser_add_to_experiment.add_argument(
'group', type=str, action='store', help='enable experiment on this group of routers')
parser_add_to_experiment.add_argument(
'release', type=str, action='store', help='add package for this release (e.g., quirm)')
parser_add_to_experiment.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_add_to_experiment.add_argument(
'package', type=str, action='store', help='name of the package to install')
parser_add_to_experiment.add_argument(
'version', type=str, action='store', help='version of the package')
parser_add_to_experiment.set_defaults(
handler=subcommands.add_to_experiment)
parser_remove_from_experiment = subparsers.add_parser(
'remove-package', help='remove a package from an experiment')
parser_remove_from_experiment.add_argument(
'experiment', type=str, action='store', help='experiment identifier')
parser_remove_from_experiment.add_argument(
'group', type=str, action='store', help='remove packages from this group of routers')
parser_remove_from_experiment.add_argument(
'release', type=str, action='store', help='remove package from this release (e.g., quirm)')
parser_remove_from_experiment.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_remove_from_experiment.add_argument(
'package', type=str, action='store', help='name of the package')
parser_remove_from_experiment.add_argument(
'version', type=str, action='store', help='version of the package')
parser_remove_from_experiment.set_defaults(
handler=subcommands.remove_from_experiment)
parser_list_experiment = subparsers.add_parser(
'list', help='list experiment details')
parser_list_experiment.add_argument(
'experiment', type=str, nargs='?', action='store', help='list details for this experiment')
parser_list_experiment.set_defaults(handler=subcommands.list_experiment)
parser_list_experiment = subparsers.add_parser(
'list-all', help='list all experiments')
parser_list_experiment.set_defaults(
handler=subcommands.list_all_experiments)
parser_list_experiment_packages = subparsers.add_parser(
'list-packages', help='list packages for an experiment')
parser_list_experiment_packages.add_argument(
'experiment', type=str, action='store', help='list packages for this experiment')
parser_list_experiment_packages.set_defaults(
handler=subcommands.list_experiment_packages)
parser_install_by_default = subparsers.add_parser(
'install-by-default', help='Install an experiment by default')
parser_install_by_default.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_install_by_default.add_argument(
'group', nargs='+', type=str, action='store', help='install by default on these routers')
parser_install_by_default.set_defaults(
handler=subcommands.install_by_default)
parser_uninstall_by_default = subparsers.add_parser(
'uninstall-by-default', help="Don't Install an experiment by default")
parser_uninstall_by_default.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_uninstall_by_default.add_argument(
'group', nargs='+', type=str, action='store', help='install by default on these routers')
parser_uninstall_by_default.set_defaults(
handler=subcommands.uninstall_by_default)
parser_require_experiment = subparsers.add_parser(
'require', help='require a group of routers to install an experiment')
parser_require_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_require_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='require the experiment on these routers')
parser_require_experiment.set_defaults(
handler=subcommands.require_experiment)
parser_unrequire_experiment = subparsers.add_parser(
'unrequire', help='stop requiring a group of routers to install an experiment')
parser_unrequire_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_unrequire_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='stop requiring the experiment on these routers')
parser_unrequire_experiment.set_defaults(
handler=subcommands.unrequire_experiment)
parser_revoke_experiment = subparsers.add_parser(
'revoke', help='revoke an experiment on a group of routers')
parser_revoke_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_revoke_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='revoke the experiment on these routers')
parser_revoke_experiment.set_defaults(
handler=subcommands.revoke_experiment)
parser_unrevoke_experiment = subparsers.add_parser(
'unrevoke', help='stop revoking a group of routers to install an experiment')
parser_unrevoke_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_unrevoke_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='stop revoking the experiment on these routers')
parser_unrevoke_experiment.set_defaults(
handler=subcommands.unrevoke_experiment)
def create_packages_subcommands(subparsers):
parser_add_packages = subparsers.add_parser(
'import', help='import ipk files for a release')
parser_add_packages.add_argument(
'release', type=str, action='store', help='import packages for this release (e.g., quirm)')
parser_add_packages.add_argument(
'ipk', nargs='+', type=str, action='store', help='ipkg files to import')
parser_add_packages.set_defaults(handler=subcommands.add_packages)
parser_list_packages = subparsers.add_parser(
'list', help='list available packages')
parser_list_packages.add_argument(
'release', type=str, nargs='?', action='store', help='list packages for this release (e.g., quirm)')
parser_list_packages.set_defaults(handler=subcommands.list_packages)
parser_list_builtin_packages = subparsers.add_parser(
'list-builtin', help='list builtin packages for a release')
parser_list_builtin_packages.add_argument(
'release', type=str, nargs='?', action='store', help='name of the release (e.g., quirm)')
parser_list_builtin_packages.add_argument(
'architecture', type=str, nargs='?', action='store', help='target architecture (e.g., ar71xx)')
parser_list_builtin_packages.set_defaults(
handler=subcommands.list_builtin_packages)
parser_list_extra_packages = subparsers.add_parser(
'list-extra', help='list "extra" packages for a release')
parser_list_extra_packages.add_argument(
'release', type=str, nargs='?', action='store', help='name of the release (e.g., quirm)')
parser_list_extra_packages.add_argument(
'architecture', type=str, nargs='?', action='store', help='target architecture (e.g., ar71xx)')
parser_list_extra_packages.set_defaults(
handler=subcommands.list_extra_packages)
parser_list_upgrades = subparsers.add_parser(
'list-upgrades', help='list package upgrades for nodes')
parser_list_upgrades.add_argument(
'release', type=str, nargs='?', action='store', help='show upgrades from this release (e.g., quirm)')
parser_list_upgrades.set_defaults(handler=subcommands.list_upgrades)
parser_remove_extra_package = subparsers.add_parser(
'remove-from-extra', help='remove packages from the "extra" set')
parser_remove_extra_package.add_argument(
'release', type=str, action='store', help='remove package from this release (e.g., quirm)')
parser_remove_extra_package.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_remove_extra_package.add_argument(
'package', type=str, action='store', help='name of the package to remove')
parser_remove_extra_package.add_argument(
'version', type=str, action='store', help='version of the package')
parser_remove_extra_package.set_defaults(
handler=subcommands.remove_extra_package)
parser_add_extra_package = subparsers.add_parser(
'add-to-extra', help='add packages to the "extra" set')
parser_add_extra_package.add_argument(
'release', type=str, action='store', help='add package from this release (e.g., quirm)')
parser_add_extra_package.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_add_extra_package.add_argument(
'package', type=str, action='store', help='name of the package to add')
parser_add_extra_package.add_argument(
'version', type=str, action='store', help='version of the package')
parser_add_extra_package.set_defaults(
handler=subcommands.add_extra_package)
parser_upgrade_package = subparsers.add_parser(
'upgrade', help='upgrade a builtin package on a set of routers')
parser_upgrade_package.add_argument(
'group', type=str, action='store', help='upgrade on this group of routers')
parser_upgrade_package.add_argument(
'release', type=str, action='store', help='upgrade package for this release (e.g., quirm)')
parser_upgrade_package.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_upgrade_package.add_argument(
'package', type=str, action='store', help='name of the builtin package to upgrade')
parser_upgrade_package.add_argument(
'version', type=str, action='store', help='new version of the package')
parser_upgrade_package.set_defaults(handler=subcommands.upgrade_package)
def create_releases_subcommands(subparsers):
parser_list_releases = subparsers.add_parser(
'list', help='list all releases')
parser_list_releases.set_defaults(handler=subcommands.list_releases)
parser_list_architectures = subparsers.add_parser(
'list-architectures', help='list architectures for a release')
parser_list_architectures.add_argument(
'release', type=str, action='store', help='name of the release (e.g., quirm)')
parser_list_architectures.set_defaults(
handler=subcommands.list_architectures)
parser_new_release = subparsers.add_parser(
'new', help='create a new release')
parser_new_release.add_argument(
'name', type=str, action='store', help='name of this release (e.g., quirm)')
parser_new_release.add_argument(
'buildroot', type=str, action='store', help='a compiled OpenWRT buildroot for the release')
parser_new_release.set_defaults(handler=subcommands.new_release)
def main():
parser = argparse.ArgumentParser(
description='Publish releases of BISmark images, packages, and experiments')
parser.add_argument('--root', dest='root', action='store',
default='~/bismark-releases', help='store release configuration in this directory')
log_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITITCAL']
parser.add_argument('--loglevel', dest='loglevel', action='store',
choices=log_levels, default='WARNING', help='control verbosity of logging')
parser.add_argument('--logfile', dest='logfile', action='store',
default=None, help='append logs to this file')
subparsers = parser.add_subparsers(title='commands')
parser_groups = subparsers.add_parser(
'groups', help='Manage groups of nodes')
groups_subparsers = parser_groups.add_subparsers(title='group subcommands')
create_groups_subcommands(groups_subparsers)
parser_experiments = subparsers.add_parser(
'experiments', help='Manage experiments')
experiments_subparsers = parser_experiments.add_subparsers(
title='experiments subcommands')
create_experiments_subcommands(experiments_subparsers)
parser_packages = subparsers.add_parser('packages', help='Manage packages')
packages_subparsers = parser_packages.add_subparsers(
title='packages subcommands')
create_packages_subcommands(packages_subparsers)
parser_releases = subparsers.add_parser('releases', help='Manage releases')
releases_subparsers = parser_releases.add_subparsers(
title='releases subcommands')
create_releases_subcommands(releases_subparsers)
parser_commit = subparsers.add_parser(
'commit', help='commit current release configuration to git')
parser_commit.set_defaults(handler=subcommands.commit)
parser_diff = subparsers.add_parser(
'diff', help='show changes ready to be committed to git')
parser_diff.set_defaults(handler=subcommands.diff)
parser_deploy = subparsers.add_parser('deploy',
help='deploy all releases',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_deploy.add_argument(
'-d', '--destination', type=str,
default='bismark-downloads.noise.gatech.edu:/var/www/downloads.projectbismark.net',
action='store', help='deploy to this directory')
parser_deploy.add_argument(
'-k', '--signingkey', type=str,
default='~/.bismark_signing_key.pem',
action='store', help='sign Packages.gz with this key')
parser_deploy.set_defaults(handler=subcommands.deploy)
parser_deploy = subparsers.add_parser(
'check', help='check validity of the release configuration')
parser_deploy.set_defaults(handler=subcommands.check)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename=args.logfile,
level=getattr(logging, args.loglevel))
releases_tree = tree.BismarkReleasesTree(os.path.expanduser(args.root))
args.handler(releases_tree, args)
if __name__ == '__main__':
main()
|
mit
| -1,297,748,715,572,889,900
| 49.621387
| 109
| 0.686212
| false
| 3.870718
| false
| false
| false
|
Leonidas-from-XIV/whatsonair
|
parsers/fm4.py
|
1
|
1241
|
#!/usr/bin/env python
# -*- encoding: UTF-8 -*-
import base
class FM4Parser(base.StationBase):
"""The Parser for the austrian sidestream radio station
FM4, which is part of ORF.
Look at it's homepage http://fm4.orf.at
Maybe besser use this songlist?
http://fm4.orf.at/trackservicepopup/main
But then we loose the ability to parse OE3 as well"""
__station__ = 'FM4'
def __init__(self, url='http://hop.orf.at/img-trackservice/fm4.html',
stream='mms://stream1.orf.at/fm4_live'):
base.StationBase.__init__(self, url)
def parse(self):
"""Call feed first"""
# get the titles and the artists
soup = base.Soup(self.pagecontent)
titles = [node.string for node in
base.select(soup, 'span.tracktitle')]
artists = [node.string for node in
base.select(soup, 'span.artist')]
# combine these
combined = zip(artists, titles)
# get the last artist and title
self.artist, self.title = combined[-1]
def current_track(self):
return u"%s - %s" % (self.artist, self.title)
Parser = FM4Parser
if __name__ == '__main__':
base.test_parser(Parser, 'fm4.html')
|
gpl-3.0
| -8,550,235,871,334,491,000
| 28.547619
| 73
| 0.593876
| false
| 3.515581
| false
| false
| false
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/barpolar/marker/_colorbar.py
|
1
|
11475
|
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="barpolar.marker", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format.
And for dates see:
https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's
date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for
fractional seconds with n digits. For example,
*2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.barpola
r.marker.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.barpolar.marker.colorbar.tickformatstopdefaul
ts), sets the default property values to use
for elements of
barpolar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.barpolar.marker.co
lorbar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
barpolar.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
barpolar.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
|
mit
| 1,426,902,938,938,346,500
| 46.8125
| 88
| 0.525229
| false
| 4.897567
| false
| false
| false
|
RicardoJohann/frappe
|
frappe/utils/user.py
|
1
|
10944
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _dict
import frappe.share
from frappe.utils import cint
from frappe.boot import get_allowed_reports
from frappe.permissions import get_roles, get_valid_perms
from frappe.core.doctype.domain_settings.domain_settings import get_active_modules
class UserPermissions:
"""
A user permission object can be accessed as `frappe.get_user()`
"""
def __init__(self, name=''):
self.defaults = None
self.name = name or frappe.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_delete = []
self.can_search = []
self.can_get_report = []
self.can_import = []
self.can_export = []
self.can_print = []
self.can_email = []
self.can_set_user_permissions = []
self.allow_modules = []
self.in_create = []
self.setup_user()
def setup_user(self):
def get_user_doc():
user = None
try:
user = frappe.get_doc("User", self.name).as_dict()
except frappe.DoesNotExistError:
pass
except Exception as e:
# install boo-boo
if not frappe.db.is_table_missing(e): raise
return user
if not frappe.flags.in_install_db and not frappe.flags.in_test:
user_doc = frappe.cache().hget("user_doc", self.name, get_user_doc)
if user_doc:
self.doc = frappe.get_doc(user_doc)
def get_roles(self):
"""get list of roles"""
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def build_doctype_map(self):
"""build map of special doctype properties"""
active_domains = frappe.get_active_domains()
self.doctype_map = {}
for r in frappe.db.sql("""select name, in_create, issingle, istable,
read_only, restrict_to_domain, module from tabDocType""", as_dict=1):
if (not r.restrict_to_domain) or (r.restrict_to_domain in active_domains):
self.doctype_map[r['name']] = r
def build_perm_map(self):
"""build map of permissions at level 0"""
self.perm_map = {}
for r in get_valid_perms():
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in frappe.permissions.rights:
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
"""build lists of what the user can read / write / create
quirks:
read_only => Not in Search
in_create => Not in create
"""
self.build_doctype_map()
self.build_perm_map()
user_shared = frappe.share.get_shared_doctypes()
no_list_view_link = []
active_modules = get_active_modules() or []
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not p.get("read") and (dt in user_shared):
p["read"] = 1
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
# read_only = "User Cannot Search"
self.all_read.append(dt)
no_list_view_link.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if p.get('delete'):
self.can_delete.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
for key in ("import", "export", "print", "email", "set_user_permissions"):
if p.get(key):
getattr(self, "can_" + key).append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if dtp.get('module') not in self.allow_modules:
if active_modules and dtp.get('module') not in active_modules:
pass
else:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.shared = frappe.db.sql_list("""select distinct share_doctype from `tabDocShare`
where `user`=%s and `read`=1""", self.name)
self.can_read = list(set(self.can_read + self.shared))
self.all_read += self.can_read
for dt in no_list_view_link:
if dt in self.can_read:
self.can_read.remove(dt)
if "System Manager" in self.get_roles():
self.can_import = filter(lambda d: d in self.can_create,
frappe.db.sql_list("""select name from `tabDocType` where allow_import = 1"""))
def get_defaults(self):
import frappe.defaults
self.defaults = frappe.defaults.get_defaults(self.name)
return self.defaults
# update recent documents
def update_recent(self, dt, dn):
rdl = frappe.cache().hget("user_recent", self.name) or []
new_rd = [dt, dn]
# clear if exists
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
frappe.cache().hset("user_recent", self.name, rdl)
def _get(self, key):
if not self.can_read:
self.build_permissions()
return getattr(self, key)
def get_can_read(self):
"""return list of doctypes that the user can read"""
if not self.can_read:
self.build_permissions()
return self.can_read
def load_user(self):
d = frappe.db.sql("""select email, first_name, last_name, creation,
email_signature, user_type, language, background_image, background_style,
mute_sounds, send_me_a_copy from tabUser where name = %s""", (self.name,), as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(frappe.cache().hget("user_recent", self.name) or [])
d.roles = self.get_roles()
d.defaults = self.get_defaults()
for key in ("can_create", "can_write", "can_read", "can_cancel", "can_delete",
"can_get_report", "allow_modules", "all_read", "can_search",
"in_create", "can_export", "can_import", "can_print", "can_email",
"can_set_user_permissions"):
d[key] = list(set(getattr(self, key)))
d.all_reports = self.get_all_reports()
return d
def get_all_reports(self):
return get_allowed_reports()
def get_user_fullname(user):
fullname = frappe.db.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabUser` WHERE name=%s", (user,))
return fullname and fullname[0][0] or ''
def get_fullname_and_avatar(user):
first_name, last_name, avatar, name = frappe.db.get_value("User",
user, ["first_name", "last_name", "user_image", "name"])
return _dict({
"fullname": " ".join(filter(None, [first_name, last_name])),
"avatar": avatar,
"name": name
})
def get_system_managers(only_name=False):
"""returns all system manager's user details"""
import email.utils
from frappe.core.doctype.user.user import STANDARD_USERS
system_managers = frappe.db.sql("""SELECT DISTINCT `name`, `creation`,
CONCAT_WS(' ',
CASE WHEN `first_name`= '' THEN NULL ELSE `first_name` END,
CASE WHEN `last_name`= '' THEN NULL ELSE `last_name` END
) AS fullname
FROM `tabUser` AS p
WHERE `docstatus` < 2
AND `enabled` = 1
AND `name` NOT IN ({})
AND exists
(SELECT *
FROM `tabHas Role` AS ur
WHERE ur.parent = p.name
AND ur.role='System Manager')
ORDER BY `creation` DESC""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=True)
if only_name:
return [p.name for p in system_managers]
else:
return [email.utils.formataddr((p.fullname, p.name)) for p in system_managers]
def add_role(user, role):
frappe.get_doc("User", user).add_roles(role)
def add_system_manager(email, first_name=None, last_name=None, send_welcome_email=False):
# add user
user = frappe.new_doc("User")
user.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name,
"user_type": "System User",
"send_welcome_email": 1 if send_welcome_email else 0
})
user.insert()
# add roles
roles = frappe.get_all('Role',
fields=['name'],
filters={
'name': ['not in', ('Administrator', 'Guest', 'All')]
}
)
roles = [role.name for role in roles]
user.add_roles(*roles)
def get_enabled_system_users():
# add more fields if required
return frappe.get_all('User',
fields=['email', 'language', 'name'],
filters={
'user_type': 'System User',
'enabled': 1,
'name': ['not in', ('Administrator', 'Guest')]
}
)
def is_website_user():
return frappe.db.get_value('User', frappe.session.user, 'user_type') == "Website User"
def is_system_user(username):
return frappe.db.get_value("User", {"name": username, "enabled": 1, "user_type": "System User"})
def get_users():
from frappe.core.doctype.user.user import get_system_users
users = []
system_managers = frappe.utils.user.get_system_managers(only_name=True)
for user in get_system_users():
users.append({
"full_name": frappe.utils.user.get_user_fullname(user),
"email": user,
"is_system_manager": 1 if (user in system_managers) else 0
})
return users
def set_last_active_to_now(user):
from frappe.utils import now_datetime
frappe.db.set_value("User", user, "last_active", now_datetime())
def disable_users(limits=None):
if not limits:
return
if limits.get('users'):
system_manager = get_system_managers(only_name=True)[-1]
#exclude system manager from active user list
active_users = frappe.db.sql_list("""select name from tabUser
where name not in ('Administrator', 'Guest', %s) and user_type = 'System User' and enabled=1
order by creation desc""", system_manager)
user_limit = cint(limits.get('users')) - 1
if len(active_users) > user_limit:
# if allowed user limit 1 then deactivate all additional users
# else extract additional user from active user list and deactivate them
if cint(limits.get('users')) != 1:
active_users = active_users[:-1 * user_limit]
for user in active_users:
frappe.db.set_value("User", user, 'enabled', 0)
from frappe.core.doctype.user.user import get_total_users
if get_total_users() > cint(limits.get('users')):
reset_simultaneous_sessions(cint(limits.get('users')))
frappe.db.commit()
def reset_simultaneous_sessions(user_limit):
for user in frappe.db.sql("""select name, simultaneous_sessions from tabUser
where name not in ('Administrator', 'Guest') and user_type = 'System User' and enabled=1
order by creation desc""", as_dict=1):
if user.simultaneous_sessions < user_limit:
user_limit = user_limit - user.simultaneous_sessions
else:
frappe.db.set_value("User", user.name, "simultaneous_sessions", 1)
user_limit = user_limit - 1
def get_link_to_reset_password(user):
link = ''
if not cint(frappe.db.get_single_value('System Settings', 'setup_complete')):
user = frappe.get_doc("User", user)
link = user.reset_password(send_email=False)
frappe.db.commit()
return {
'link': link
}
|
mit
| -2,402,520,111,732,548,000
| 28.106383
| 111
| 0.660636
| false
| 2.911413
| false
| false
| false
|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfWork/CUContractorItem.py
|
1
|
3475
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class CUContractorItem(IdentifiedObject):
"""Compatible unit contractor item.Compatible unit contractor item.
"""
def __init__(self, bidAmount=0.0, activityCode='', CompatibleUnits=None, status=None, *args, **kw_args):
"""Initialises a new 'CUContractorItem' instance.
@param bidAmount: The amount that a given contractor will charge for performing this unit of work.
@param activityCode: Activity code identifies a specific and distinguishable unit of work.
@param CompatibleUnits:
@param status:
"""
#: The amount that a given contractor will charge for performing this unit of work.
self.bidAmount = bidAmount
#: Activity code identifies a specific and distinguishable unit of work.
self.activityCode = activityCode
self._CompatibleUnits = []
self.CompatibleUnits = [] if CompatibleUnits is None else CompatibleUnits
self.status = status
super(CUContractorItem, self).__init__(*args, **kw_args)
_attrs = ["bidAmount", "activityCode"]
_attr_types = {"bidAmount": float, "activityCode": str}
_defaults = {"bidAmount": 0.0, "activityCode": ''}
_enums = {}
_refs = ["CompatibleUnits", "status"]
_many_refs = ["CompatibleUnits"]
def getCompatibleUnits(self):
return self._CompatibleUnits
def setCompatibleUnits(self, value):
for p in self._CompatibleUnits:
filtered = [q for q in p.CUContractorItems if q != self]
self._CompatibleUnits._CUContractorItems = filtered
for r in value:
if self not in r._CUContractorItems:
r._CUContractorItems.append(self)
self._CompatibleUnits = value
CompatibleUnits = property(getCompatibleUnits, setCompatibleUnits)
def addCompatibleUnits(self, *CompatibleUnits):
for obj in CompatibleUnits:
if self not in obj._CUContractorItems:
obj._CUContractorItems.append(self)
self._CompatibleUnits.append(obj)
def removeCompatibleUnits(self, *CompatibleUnits):
for obj in CompatibleUnits:
if self in obj._CUContractorItems:
obj._CUContractorItems.remove(self)
self._CompatibleUnits.remove(obj)
status = None
|
mit
| -7,500,218,546,023,297,000
| 40.86747
| 108
| 0.696691
| false
| 4.248166
| false
| false
| false
|
TusharAgey/seventhsem
|
AI/search_algos/home/dfs.py
|
1
|
1653
|
import json
class MyStack: # just an implementation of a queue
def __init__(self):
self.elements = []
def push(self,val):
self.elements.append(val)
def pop(self):
val = None
try:
val = self.elements[len(self.elements) - 1]
if len(self.elements) == 1:
self.elements = []
else:
self.elements.reverse()
self.elements = self.elements[1:]
self.elements.reverse()
except:
pass
return val
def IsEmpty(self):
result = False
if len(self.elements) == 0:
result = True
return result
def getNeighbours(nextElem, arrOfArr, visited):
elems = []
i = ord(nextElem) - ord('A')
x = 0
for j in arrOfArr[i]:
if j > 0:
data = chr(x + ord('A'))
if data not in visited:
elems.append(data)
x += 1
return elems
def dfs(input):
visited = []
start = 'A' #considering A as start node always & element with 0 heuristic as goal node
#{"edges": [[0, 3, 4, -1, -1], [-1, 0, 5, 6, 7], [-1, -1, 0, 1, 2], [-1, -1, -1, 0, 1], [-1, -1, -1, -1, 0]]}
for elem in input['heuristics']:
for data in elem:
if elem[data] == 0:
goal = data
finalPath = []
finalPath.append(start)
stack = MyStack()
stack.push(start)
neighbours = []
while stack.IsEmpty() == False:
nextElem = stack.pop()
if nextElem not in finalPath:
finalPath.append(nextElem)
neighbours = getNeighbours(nextElem, input['edges'], finalPath)
for elem in neighbours:
if elem not in finalPath:
stack.push(elem)
print finalPath
return finalPath
js=open('./data/input.json')
data=json.load(js)
finalPath = {"path" : []}
finalPath['path'] = dfs(data)
with open('./data/DFS.json', 'w') as fp:
json.dump(finalPath, fp)
|
gpl-3.0
| 2,728,845,798,203,362,300
| 24.446154
| 110
| 0.628554
| false
| 2.674757
| false
| false
| false
|
sryza/freewaydata
|
python/traveltime.py
|
1
|
2503
|
import numpy as np
import pandas as pd
def datetime64_to_microseconds(dt):
return dt.astype('uint64')
def travel_time(start_time, path, measurements_by_station, station_metadata, time_granularity=60*60):
"""Calculate the travel time along the given path at the given start time
Args:
path - list of station IDs that must be traversed to reach the destination
start_time - start time datetime64
station_data - dataframes grouped by station
time_granularity - granularity of samples in seconds
"""
time_granularity *= 1000000 # convert to microseconds
time = datetime64_to_microseconds(start_time)
total_dist = 0
for i in range(len(path)-1):
# calculate how long it takes to get to the next station based on the
# current time
sid1 = path[i]
sid2 = path[i+1]
measurements = measurements_by_station[sid1]
quantized = np.datetime64(time - time % time_granularity)
filtered = measurements[measurements['timestamp'] == quantized]
speed = filtered.iloc[0]['avgspeed']
if np.isnan(speed):
return (np.nan, np.nan)
station1_metadata = station_metadata.loc[sid1]
station2_metadata = station_metadata.loc[sid2]
dist = abs(station1_metadata['Abs_PM'] - station2_metadata['Abs_PM'])
total_dist += dist
# TODO: what if speed is NAN? interpolate
time += 1000000 * 60 * 60 * dist / speed
return (total_dist, np.datetime64(time) - start_time)
def test_travel_time():
path = [213, 224, 285, 485]
station_metadata = pd.DataFrame({'Abs_PM' : pd.Series([0, 60, 75, 85], index=[213, 224, 285, 485])})
base_time = np.datetime64('2013-01-01')
hour = np.timedelta64(1000000 * 60 * 60)
times = pd.Series([base_time, base_time + hour], index=range(2))
speeds = [[40, np.nan], [np.nan, 60], [np.nan, 120], [np.nan, np.nan]]
samples_by_station = {path[i] : pd.DataFrame({'timestamp' : times, 'avgspeed' : speeds[i]}) for i in range(len(path))}
start_time = base_time + np.timedelta64(5 * 1000000 * 60) # start at 5 minutes past the hour
# Traveling 60 miles at 40 MPH should put us in the next hour (total time = 1:35)
# Then traveling 15 miles at 60 MPH should keep us in the same hour (total time = 1:50)
# Then 10 miles at 120 MPH should get us to our destination (total time = 1:55)
# Travel time is 1:55 minus the 5 minutes past the hour we started at, so 1:50
print travel_time(start_time, path, samples_by_station, station_metadata)
if __name__ == '__main__':
test_travel_time()
|
apache-2.0
| -3,499,258,868,963,724,300
| 40.716667
| 120
| 0.681582
| false
| 3.310847
| false
| false
| false
|
ahri/pycurlbrowser
|
pycurlbrowser/rest_client.py
|
1
|
3465
|
# coding: utf-8
"""
REST functionality based off pycurlbrowser's Browser.
"""
try:
import simplejson as json
except ImportError:
import json
from . import Browser
class StatusInformational(Exception):
"""
Represent 1xx status codes
"""
class StatusRedirection(Exception):
"""
Represent 3xx status codes
"""
class StatusClientError(Exception):
"""
Represent 4xx status codes
"""
class StatusServerError(Exception):
"""
Represent 5xx status codes
"""
def status_factory(status):
"""Post exceptions based on HTTP status codes"""
if 100 <= status < 200:
return StatusInformational()
elif 300 <= status < 400:
return StatusRedirection()
elif 400 <= status < 500:
return StatusClientError()
elif 500 <= status < 600:
return StatusServerError()
raise ValueError("Unsupported error code: %d" % status)
class RestClient(Browser):
"""
A simple REST client based upon pycurlbrowser
"""
def __init__(self, base, *args, **kwargs):
super(RestClient, self).__init__(*args, **kwargs)
self.base = base
def go(self, obj, method, uid=None, data=None, headers=None):
url = '%(base)s/%(obj)s' % {'base': self.base,
'obj' : obj}
if uid is not None:
url += '/%s' % uid
super(RestClient, self).go(url=url,
method=method,
data=data,
headers=headers)
if self.http_code != 200:
raise status_factory(self.http_code)
return self.http_code
# CRUD
def post(self, obj, data=None, headers=None):
"""Post"""
self.go(obj, 'POST', data=data, headers=headers)
return self.src
def get(self, obj, uid=None, headers=None):
"""Get"""
self.go(obj, 'GET', uid=uid, headers=headers)
return self.src
def head(self, obj, uid=None, headers=None):
"""Head"""
# TODO: care about headers
# TODO: think about self._curl.setopt(pycurl.NOBODY, 1)
self.go(obj, 'HEAD', uid=uid, headers=headers)
def put(self, obj, uid, data=None, headers=None):
"""Put"""
self.go(obj, 'PUT', uid=uid, data=data, headers=headers)
return self.src
def delete(self, obj, uid, headers=None):
"""Delete"""
# TODO: care about headers
self.go(obj, 'DELETE', uid=uid, headers=headers)
return self.src
class RestClientJson(RestClient):
"""
A REST client that only speaks JSON
"""
def post(self, obj, data=None):
"""Post"""
res = super(RestClientJson, self).post(obj, json.dumps(data), headers={'Content-Type': 'text/json'})
if len(res) > 0:
return json.loads(res)
return None
def get(self, obj, uid=None):
"""Get"""
return json.loads(super(RestClientJson, self).get(obj, uid))
def put(self, obj, uid, data=None):
"""Put"""
res = super(RestClientJson, self).put(obj, uid, json.dumps(data), headers={'Content-Type': 'text/json'})
if len(res) > 0:
return json.loads(res)
return None
def delete(self, obj, uid):
"""Delete"""
res = super(RestClientJson, self).delete(obj, uid)
if len(res) > 0:
return json.loads(res)
return None
|
agpl-3.0
| 3,467,552,450,697,954,300
| 24.858209
| 112
| 0.561328
| false
| 3.897638
| false
| false
| false
|
luoguanyang/google-python-exercises
|
basic/wordcount.py
|
1
|
2854
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
def wordcountDict(filename):
dict={}
f=open(filename,'rU')
for line in f:
lowerline=line.lower()
words=lowerline.split()
for word in words:
if not (word in dict):
dict[word]=1
else:
dict[word] +=1
f.close()
return dict
def print_words(filename):
dict= wordcountDict(filename)
keys=sorted(dict.keys())
for key in keys:
print key+' '+str(dict[key])
def print_top(filename):
dict=wordcountDict(filename)
def sortByCount(key):
return dict[key]
keys=sorted(dict,key=sortByCount)#dict is a list of keys?
keys.reverse()
for key in keys[:20]:
print key+' '+str(dict[key])
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
|
apache-2.0
| 5,757,111,439,567,327,000
| 28.729167
| 79
| 0.6822
| false
| 3.626429
| false
| false
| false
|
nzjoel1234/sprinkler
|
driver/input_thread.py
|
1
|
3639
|
import threading
SIMULATED = False
try:
import Adafruit_CharLCD as Lcd
except ImportError:
import lcd_simulator as Lcd
SIMULATED = True
DEBOUNCE_THRESHOLD = 2
SCREEN_TIMEOUT = 60
class InputThreadWrapper(threading.Thread):
def __init__(self, is_button_pressed, buttons, create_home_screen):
threading.Thread.__init__(self)
self._is_button_pressed = is_button_pressed
self._buttons = buttons
self._create_home_screen = create_home_screen
self._stop_event = threading.Event()
self._timeout_stop_event = threading.Event()
self._debounce = {}
self._button_latched = {}
self._view_model = None
self._timeout_lock = threading.RLock()
self._timeout_counter = 0
self._enabled = False
def _start_screen_timeout(self):
with self._timeout_lock:
self._timeout_counter += 1
self._timeout_stop_event.set()
self._timeout_stop_event = threading.Event()
timeout_counter = self._timeout_counter
target = lambda: self._wait_for_screen_timeout(self._timeout_stop_event, timeout_counter)
threading.Thread(target=target).start()
def _wait_for_screen_timeout(self, _stop_event, timeout_counter):
_stop_event.wait(SCREEN_TIMEOUT)
with self._timeout_lock:
if timeout_counter == self._timeout_counter:
self.set_enabled(False)
def set_enabled(self, enabled):
if not self._view_model is None:
self._view_model.set_enabled(enabled)
if not enabled:
self._timeout_stop_event.set()
self._view_model = None
self._enabled = enabled
def set_view_model(self, new_view_model=None):
if not self._view_model is None:
self._view_model.set_enabled(False)
if new_view_model is None:
new_view_model = self._create_home_screen(self.set_view_model)
self._view_model = new_view_model
new_view_model.set_enabled(self._enabled)
def on_button_press(self, button):
if button == Lcd.SELECT:
self.set_enabled(not self._enabled)
if not self._enabled:
return
if self._view_model is None:
self.set_view_model()
self._start_screen_timeout()
if button == Lcd.LEFT:
self._view_model.on_left_pressed()
elif button == Lcd.RIGHT:
self._view_model.on_right_pressed()
elif button == Lcd.UP:
self._view_model.on_up_pressed()
elif button == Lcd.DOWN:
self._view_model.on_down_pressed()
def run(self):
while not self._stop_event.is_set():
for button in self._buttons:
if not button in self._debounce:
self._debounce[button] = 0
self._button_latched[button] = False
if self._is_button_pressed(button) \
and self._debounce[button] < DEBOUNCE_THRESHOLD:
self._debounce[button] += 1
elif self._debounce[button] > 0:
self._debounce[button] -= 1
if self._debounce[button] == 0:
self._button_latched[button] = False
if self._debounce[button] == DEBOUNCE_THRESHOLD \
and not self._button_latched[button]:
self._button_latched[button] = True
self.on_button_press(button)
self._stop_event.wait(0.01)
def stop(self):
self._stop_event.set()
self._timeout_stop_event.set()
|
mit
| -8,851,845,360,741,925,000
| 35.39
| 97
| 0.573509
| false
| 3.887821
| false
| false
| false
|
eladnoor/small-molecule-regulation
|
oldcode/meta_analysis_clustering.py
|
1
|
1640
|
# -*- coding: utf-8 -*-
# Cluster and compare incidence of activation and inhibition across species
import settings as S
import pandas as pd
import os
import numpy as np
import pdb
import scipy.stats as st
import matplotlib.pyplot as plt
import seaborn as sns
plt.ion()
plt.close('all')
# Minimum number of interactions required to print data
minval = 3
ki = S.read_cache('inhibiting')
act = S.read_cache('activating')
tax = S.read_cache('TaxonomicData_temp')
# Drop entries without organism
ki = ki[pd.notnull(ki['Organism'])]
act = act[pd.notnull(act['Organism'])]
# Convert LigandID to string
ki['LigandID'] = ki['LigandID'].astype(str)
act['LigandID'] = act['LigandID'].astype(str)
# Drop null values
ki = ki[pd.notnull(ki['LigandID'])]
act = act[pd.notnull(act['LigandID'])]
# We don't want duplicate measurements of the same EC:LigandID in the same organism
ki.index = [':'.join( [ki.at[row,'EC_number'],ki.at[row,'LigandID'],ki.at[row,'Organism']] ) for row in ki.index]
act.index = [':'.join([act.at[row,'EC_number'], act.at[row,'LigandID'], act.at[row,'Organism'] ]) for row in act.index]
ki = ki[~ki.index.duplicated()]
act = act[~act.index.duplicated()]
# Make tables
print('Cross tabulating...')
kitab = pd.crosstab(ki.EC_number, ki.LigandID)
acttab = pd.crosstab(act.EC_number, act.LigandID)
# Drop indices where row or column sums equal zero
kitab = kitab.loc[(kitab.sum(axis=1) > minval), (kitab.sum(axis=0) >minval)]
acttab = acttab.loc[(acttab.sum(axis=1) > minval), (acttab.sum(axis=0) >minval)]
print('Writing to file...')
kitab.to_csv('../cache/inh_crosstab.csv')
acttab.to_csv('../cache/act_crosstab.csv')
|
mit
| 1,391,822,448,769,291,500
| 28.836364
| 119
| 0.704268
| false
| 2.675367
| false
| false
| false
|
yawd/yawd-elfinder
|
elfinder/widgets.py
|
1
|
4686
|
import json
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms.widgets import Input
from django.utils.safestring import mark_safe
from django.utils.translation import to_locale, get_language, ugettext as _
from fields import ElfinderFile
from conf import settings as ls
class ElfinderWidget(Input):
"""
A widget that opens the elfinder file manager for selecting a file.
``attrs``
The TextInput attrs
``options``
Optional. Sets the elfinder (client) configuration options
``optionset``
The key of the ELFINDER_CONNECTOR_OPTION_SETS setting to use as connector settings
"""
input_type = 'hidden'
def __init__(self, optionset, start_path, attrs={'size':'42'}, options={}):
self.options, self.optionset, self.start_path = options, optionset, start_path
super(ElfinderWidget, self).__init__(attrs)
#locate current locale
self.current_locale = to_locale(get_language())
def _media(self):
"""
Set the widget's javascript and css
"""
js = [ls.ELFINDER_JS_URLS[x] for x in sorted(ls.ELFINDER_JS_URLS)] + [ls.ELFINDER_WIDGET_JS_URL]
screen_css = [ls.ELFINDER_CSS_URLS[x] for x in sorted(ls.ELFINDER_CSS_URLS)] + [ls.ELFINDER_WIDGET_CSS_URL]
#add language file to javascript media
if not self.current_locale.startswith('en') and self.current_locale in ls.ELFINDER_LANGUAGES:
js.append('%selfinder.%s.js' % (ls.ELFINDER_LANGUAGES_ROOT_URL, self.current_locale))
return forms.Media(css= {'screen': screen_css}, js = js)
media = property(_media)
def render(self, name, value, attrs=None):
"""
Display the widget
"""
#if self.optionset in ls.ELFINDER_CONNECTOR_OPTION_SETS and 'uploadAllow' in ls.ELFINDER_CONNECTOR_OPTION_SETS[self.optionset] and ls.ELFINDER_CONNECTOR_OPTION_SETS[self.optionset]['uploadAllow']:
# html = '<div class="elfinder_filetypes">(' + _('Allowed mime types: ') + str(ls.ELFINDER_CONNECTOR_OPTION_SETS[self.optionset]['uploadAllow']) + ')</div>'
#update the elfinder client options
self.options.update({
'url' : reverse('yawdElfinderConnectorView', args=[
self.optionset,
'default' if self.start_path is None else self.start_path
]),
'rememberLastDir' : True if not self.start_path else False,
})
if not 'rmSoundUrl' in self.options:
self.options['rmSoundUrl'] = '%selfinder/sounds/rm.wav' % settings.STATIC_URL
#update the elfinder client language
if not self.current_locale.startswith('en') and self.current_locale in ls.ELFINDER_LANGUAGES:
self.options.update({ 'lang' : self.current_locale })
if value:
if not isinstance(value, ElfinderFile):
value = ElfinderFile(hash_=value, optionset=self.optionset)
file_ = 'file : %s' % json.dumps(value.info)
else:
file_ = 'file : {}'
elfinder = 'elfinder : %s' % json.dumps(self.options)
html = ('%(super)s\n'
'<script>\n'
' (function($) {\n'
' $(document).ready( function() {\n'
' $("#%(id)s").elfinderwidget({\n'
' %(file)s,\n'
' %(elfinder)s,\n'
' keywords : { size : "%(size)s", path : "%(path)s", link : "%(link)s", modified : "%(modified)s", dimensions : "%(dimensions)s", update : "%(update)s", set : "%(set)s", clear : "%(clear)s" }'
' });\n'
' })\n'
' })(yawdelfinder.jQuery)\n'
'</script>' % {
'super' : super(ElfinderWidget, self).render(name, value, attrs),
'id' : attrs['id'],
'file' : file_,
'elfinder' : elfinder,
#these keywords are optional, since they are initialized in elfinderwidget
#we override them for localization purposes
'size' : _('Size'),
'path' : _('Path'),
'link' : _('Link'),
'modified' : _('Modified'),
'dimensions' : _('Dimensions'),
'update' : _('Update'),
'set' : _('Set'),
'clear' : _('Clear')
})
return mark_safe(html)
|
bsd-3-clause
| 8,121,584,248,681,799,000
| 43.207547
| 223
| 0.541827
| false
| 3.941127
| false
| false
| false
|
A3sal0n/FalconGate
|
lib/objects.py
|
1
|
18368
|
import collections
from lib.logger import *
class HostAlertTemplate:
def __init__(self, homenet, alert):
self.homenet = homenet
self.alert = alert
self.subject = "A " + alert[6] + " alert was reported for host " + alert[7]
self.indicators = alert[8].replace('.', '[.]').split('|')
self.references = alert[11].split('|')
self.body = ''
def create_body(self):
self.body = "Falcongate has reported a " + self.alert[6] + " alert for the device below:\r\n\r\n" \
"IP address: " + self.alert[7] + "\r\n" \
"Hostname: " + str(self.homenet.hosts[self.alert[7]].hostname) + "\r\n" \
"MAC address: " + str(self.homenet.hosts[self.alert[7]].mac) + "\r\n" \
"MAC vendor: " + str(self.homenet.hosts[self.alert[7]].vendor) + "\r\n" \
"Operating system family: " + "\r\n".join(self.homenet.hosts[self.alert[7]].os_family) + "\r\n" \
"Device family: " + str("\r\n".join(self.homenet.hosts[self.alert[7]].device_family)) + "\r\n\r\n" \
"Description: " + self.alert[10] + "\r\n\r\n" \
"The following indicators were detected:\r\n" + str("\r\n".join(self.indicators)) + "\r\n\r\n" \
"References:\r\n" + str("\r\n".join(self.references)) + "\r\n\r\n" \
"This is the first time this incident is reported.\r\n" \
"We recommend to investigate this issue as soon as possible."
class AccountBreachAlertTemplate:
def __init__(self, alert):
self.alert = alert
self.subject = "A " + alert[6] + " alert was reported for account " + alert[7]
self.indicators = alert[8].split('|')
self.references = alert[11].split('|')
self.body = ''
def create_body(self):
self.body = "Falcongate has reported a " + self.alert[6] + " alert:\r\n\r\n" \
"Account at risk: " + self.alert[7] + "\r\n\r\n" \
"Description: " + self.alert[10] + "\r\n\r\n" \
"The following indicators were detected:\r\n" + str("\r\n".join(self.indicators)) + "\r\n\r\n" \
"References:\r\n" + str("\r\n".join(self.references)) + "\r\n\r\n" \
"This is the first time this incident is reported.\r\n" \
"We recommend to change immediately the password for this account to prevent further misuse by" \
" malicious hackers."
class DefaultCredsAlertTemplate:
def __init__(self, homenet, alert):
self.homenet = homenet
self.alert = alert
self.subject = "An account with default vendor credentials was found on host " + alert[7]
self.indicators = alert[8].replace('.', '[.]').split('|')
self.references = alert[11].split('|')
self.body = ''
def create_body(self):
self.body = "Falcongate has reported a " + self.alert[6] + " alert for the device below:\r\n\r\n" \
"IP address: " + self.alert[7] + "\r\n" \
"Hostname: " + str(self.homenet.hosts[self.alert[7]].hostname) + "\r\n" \
"MAC address: " + str(self.homenet.hosts[self.alert[7]].mac) + "\r\n" \
"MAC vendor: " + str(self.homenet.hosts[self.alert[7]].vendor) + "\r\n" \
"Operating system family: " + "\r\n".join(self.homenet.hosts[self.alert[7]].os_family) + "\r\n" \
"Device family: " + str("\r\n".join(self.homenet.hosts[self.alert[7]].device_family)) + "\r\n\r\n" \
"Description: " + self.alert[10] + "\r\n\r\n" \
"The following indicators were detected:\r\n" + str("\r\n".join(self.indicators)) + "\r\n\r\n" \
"References:\r\n" + str("\r\n".join(self.references)) + "\r\n\r\n" \
"We recommend you to fix this issue as soon as possible."
class DNSRequest:
def __init__(self):
self.ts = None
self.lseen = None
self.query = None
self.sld = None
self.tld = None
self.cip = None
self.sip = None
self.qtype = None
self.qresult = None
self.bad = False
self.counter = 0
class HTTPObject:
def __init__(self):
self.ts = None
self.lseen = None
self.src_ip = None
self.dst_ip = None
self.dest_port = None
self.host = None
# {'url': ['method', 'status_code', 'user_agent', 'referrer', 'response_body_len', 'proxied', 'mime_type']}
self.urls = {}
class Conn:
def __init__(self):
self.ts = None
self.lseen = None
self.src_ip = None
self.dst_ip = None
self.dst_port = None
self.proto = None
self.service = None
self.direction = None
self.duration = 0
self.client_bytes = 0
self.server_bytes = 0
self.client_packets = 0
self.server_packets = 0
self.src_country_code = None
self.src_country_name = None
self.dst_country_code = None
self.dst_country_name = None
self.counter = 0
class PortScan:
def __init__(self):
self.ts = None
self.lseen = None
self.src_ip = None
self.dst_ip = None
self.duration = None
class Host:
def __init__(self):
self.ts = None
self.lseen = None
self.mac = None
self.ip = None
self.hostname = None
self.vendor = None
self.os_family = []
self.device_family = []
self.dga_domains = []
self.spammed_domains = []
self.user_agents = []
self.dns = {}
self.conns = {}
self.files = {}
self.scans = {}
self.alerts = []
self.interesting_urls = []
self.tcp_ports = []
self.udp_ports = []
self.vuln_accounts = []
class Network:
def __init__(self):
self.pid = None
self.executable = None
self.args = []
self.hosts = {}
self.mac_history = {}
self.interface = None
self.mac = None
self.ip = None
self.gateway = None
self.netmask = None
self.net_cidr = None
self.bad_ips = {'Tor': [], 'Malware': [], 'Botnet': [], 'Hacking': [], 'Phishing': [], 'Ransomware': [],
'Ads': [], 'User': []}
self.bad_domains = {'Tor': [], 'Malware': [], 'Botnet': [], 'Hacking': [], 'Phishing': [], 'Ransomware': [],
'Ads': [], 'Crypto-miners': [], 'User': []}
self.user_blacklist = []
self.user_whitelist = []
self.user_domain_blacklist = []
self.user_domain_whitelist = []
self.target_mime_types = ["application/x-7z-compressed", "application/x-ace-compressed", "application/x-shockwave-flash",
"application/pdf", "application/vnd.android.package-archive", "application/octet-stream",
"application/x-bzip", "application/x-bzip2", "application/x-debian-package", "application/java-archive",
" application/javascript", "application/x-msdownload", "application/x-ms-application", "application/vnd.ms-excel",
"application/vnd.ms-excel.addin.macroenabled.12", "application/vnd.ms-excel.sheet.binary.macroenabled.12",
"application/vnd.ms-excel.template.macroenabled.12", "application/vnd.ms-excel.sheet.macroenabled.12",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template", "application/vnd.ms-powerpoint.slide.macroenabled.12",
"application/vnd.ms-powerpoint.presentation.macroenabled.12", "application/vnd.ms-powerpoint.slideshow.macroenabled.12",
"application/vnd.ms-powerpoint.template.macroenabled.12", "application/msword", "application/vnd.ms-word.document.macroenabled.12",
"application/vnd.ms-word.template.macroenabled.12", "application/x-rar-compressed", "application/x-tar", "application/zip", "application/x-dosexec",
"application/x-ms-installer", "application/x-elf", "application/x-sh", "text/x-perl", "text/x-python", "image/x-icon", "application/x-executable"]
self.tld_whitelist = ['local', 'test', 'localhost', 'example', 'invalid', 'arpa']
# Malicious TLDs
# https://www.tripwire.com/state-of-security/security-data-protection/cyber-security/most-suspicious-tlds-revealed-by-blue-coat-systems/
# https://www.spamhaus.org/statistics/tlds/
self.tld_blacklist = ['zip', 'review', 'country', 'kim', 'cricket', 'science', 'work', 'party', 'gq', 'link',
'gdn', 'stream', 'download', 'top', 'us', 'study', 'click', 'biz']
self.vt_api_key = None
self.dst_emails = None
self.email_watchlist = []
self.fg_intel_creds = None
self.fg_intel_ip = None
self.fg_intel_domains = None
self.vt_api_domain_url = None
self.vt_api_ip_url = None
self.vt_api_file_url = None
self.hibp_api_url = None
self.mailer_mode = None
self.mailer_address = None
self.mailer_pwd = None
self.allow_tor = None
self.last_alert_id = 0
self.blacklist_sources_ip = {}
self.blacklist_sources_domain = {}
class Report:
def __init__(self, alert):
self.alert = alert
self.alert_name = None
self.description = None
self.src_mac = None
self.src_ip = None
self.vendor = None
self.vt_reports = []
class Indicator:
def __init__(self):
self.DGA = None
self.domain = []
self.dst_ip = []
class File:
def __init__(self):
self.ts = None
self.fuid = None
self.lseen = None
self.tx_hosts = None
self.rx_hosts = None
self.conn_id = None
self.mime_type = None
self.md5 = None
self.sha1 = None
self.size = None
self.vt_flag = False
self.vt_positives = 0
self.vt_report = None
class DefaultCredentials:
def __init__(self):
self.service = ''
self.port = ''
self.user = ''
self.password = ''
class Country:
def __init__(self, code, name):
self.code = code
self.name = name
self.is_risky = self.is_risky(code)
self.hourly_stats = {}
@staticmethod
def is_risky(ccode):
risk_countries = ["CN", "US", "TR", "BR", "RU", "VN", "JP", "IN", "TW", "RO", "HU"]
if ccode in risk_countries:
return True
else:
return False
def get_stats(self, stime, etime):
sout = {"bytes_sent": 0, "bytes_received": 0, "pqt_sent": 0, "pqt_received": 0, "nconn": 0}
skeys = sorted(self.hourly_stats)
try:
for k in skeys:
if stime <= k <= etime:
sout["bytes_sent"] += self.hourly_stats[k].data_sent
sout["bytes_received"] += self.hourly_stats[k].data_received
sout["pqt_sent"] += self.hourly_stats[k].pqt_sent
sout["pqt_received"] += self.hourly_stats[k].pqt_received
sout["nconn"] += self.hourly_stats[k].nconn
except Exception as e:
log.debug('FG-ERROR: ' + str(e.__doc__) + " - " + str(e))
return sout
class HourStats:
def __init__(self):
self.data_sent = 0
self.data_received = 0
self.pqt_sent = 0
self.pqt_received = 0
self.nconn = 0
# Other useful stuff
CC = {
"AF": "AFGHANISTAN",
"AX": "ALAND ISLANDS",
"AL": "ALBANIA",
"DZ": "ALGERIA",
"AS": "AMERICAN SAMOA",
"AD": "ANDORRA",
"AO": "ANGOLA",
"AI": "ANGUILLA",
"AQ": "ANTARCTICA",
"AG": "ANTIGUA AND BARBUDA",
"AR": "ARGENTINA",
"AM": "ARMENIA",
"AW": "ARUBA",
"AU": "AUSTRALIA",
"AT": "AUSTRIA",
"AZ": "AZERBAIJAN",
"BS": "BAHAMAS",
"BH": "BAHRAIN",
"BD": "BANGLADESH",
"BB": "BARBADOS",
"BY": "BELARUS",
"BE": "BELGIUM",
"BZ": "BELIZE",
"BJ": "BENIN",
"BM": "BERMUDA",
"BT": "BHUTAN",
"BO": "BOLIVIA, PLURINATIONAL STATE OF",
"BQ": "BONAIRE, SINT EUSTATIUS AND SABA",
"BA": "BOSNIA AND HERZEGOVINA",
"BW": "BOTSWANA",
"BV": "BOUVET ISLAND",
"BR": "BRAZIL",
"IO": "BRITISH INDIAN OCEAN TERRITORY",
"BN": "BRUNEI DARUSSALAM",
"BG": "BULGARIA",
"BF": "BURKINA FASO",
"BI": "BURUNDI",
"KH": "CAMBODIA",
"CM": "CAMEROON",
"CA": "CANADA",
"CV": "CAPE VERDE",
"KY": "CAYMAN ISLANDS",
"CF": "CENTRAL AFRICAN REPUBLIC",
"TD": "CHAD",
"CL": "CHILE",
"CN": "CHINA",
"CX": "CHRISTMAS ISLAND",
"CC": "COCOS (KEELING) ISLANDS",
"CO": "COLOMBIA",
"KM": "COMOROS",
"CG": "CONGO",
"CD": "CONGO, THE DEMOCRATIC REPUBLIC OF THE",
"CK": "COOK ISLANDS",
"CR": "COSTA RICA",
"CI": "COTE D'IVOIRE",
"HR": "CROATIA",
"CU": "CUBA",
"CW": "CURACAO",
"CY": "CYPRUS",
"CZ": "CZECH REPUBLIC",
"DK": "DENMARK",
"DJ": "DJIBOUTI",
"DM": "DOMINICA",
"DO": "DOMINICAN REPUBLIC",
"EC": "ECUADOR",
"EG": "EGYPT",
"SV": "EL SALVADOR",
"GQ": "EQUATORIAL GUINEA",
"ER": "ERITREA",
"EE": "ESTONIA",
"EU": "EUROPE",
"ET": "ETHIOPIA",
"FK": "FALKLAND ISLANDS (MALVINAS)",
"FO": "FAROE ISLANDS",
"FJ": "FIJI",
"FI": "FINLAND",
"FR": "FRANCE",
"GF": "FRENCH GUIANA",
"PF": "FRENCH POLYNESIA",
"TF": "FRENCH SOUTHERN TERRITORIES",
"GA": "GABON",
"GM": "GAMBIA",
"GE": "GEORGIA",
"DE": "GERMANY",
"GH": "GHANA",
"GI": "GIBRALTAR",
"GR": "GREECE",
"GL": "GREENLAND",
"GD": "GRENADA",
"GP": "GUADELOUPE",
"GU": "GUAM",
"GT": "GUATEMALA",
"GG": "GUERNSEY",
"GN": "GUINEA",
"GW": "GUINEA-BISSAU",
"GY": "GUYANA",
"HT": "HAITI",
"HM": "HEARD ISLAND AND MCDONALD ISLANDS",
"VA": "HOLY SEE (VATICAN CITY STATE)",
"HN": "HONDURAS",
"HK": "HONG KONG",
"HU": "HUNGARY",
"IS": "ICELAND",
"IN": "INDIA",
"ID": "INDONESIA",
"IR": "IRAN, ISLAMIC REPUBLIC OF",
"IQ": "IRAQ",
"IE": "IRELAND",
"IM": "ISLE OF MAN",
"IL": "ISRAEL",
"IT": "ITALY",
"JM": "JAMAICA",
"JP": "JAPAN",
"JE": "JERSEY",
"JO": "JORDAN",
"KZ": "KAZAKHSTAN",
"KE": "KENYA",
"KI": "KIRIBATI",
"KP": "KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF",
"KR": "KOREA, REPUBLIC OF",
"KW": "KUWAIT",
"KG": "KYRGYZSTAN",
"LA": "LAO PEOPLE'S DEMOCRATIC REPUBLIC",
"LV": "LATVIA",
"LB": "LEBANON",
"LS": "LESOTHO",
"LR": "LIBERIA",
"LY": "LIBYA",
"LI": "LIECHTENSTEIN",
"LT": "LITHUANIA",
"LU": "LUXEMBOURG",
"MO": "MACAO",
"MK": "MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF",
"MG": "MADAGASCAR",
"MW": "MALAWI",
"MY": "MALAYSIA",
"MV": "MALDIVES",
"ML": "MALI",
"MT": "MALTA",
"MH": "MARSHALL ISLANDS",
"MQ": "MARTINIQUE",
"MR": "MAURITANIA",
"MU": "MAURITIUS",
"YT": "MAYOTTE",
"MX": "MEXICO",
"FM": "MICRONESIA, FEDERATED STATES OF",
"MD": "MOLDOVA, REPUBLIC OF",
"MC": "MONACO",
"MN": "MONGOLIA",
"ME": "MONTENEGRO",
"MS": "MONTSERRAT",
"MA": "MOROCCO",
"MZ": "MOZAMBIQUE",
"MM": "MYANMAR",
"NA": "NAMIBIA",
"NR": "NAURU",
"NP": "NEPAL",
"NL": "NETHERLANDS",
"NC": "NEW CALEDONIA",
"NZ": "NEW ZEALAND",
"NI": "NICARAGUA",
"NE": "NIGER",
"NG": "NIGERIA",
"NU": "NIUE",
"NF": "NORFOLK ISLAND",
"MP": "NORTHERN MARIANA ISLANDS",
"NO": "NORWAY",
"OM": "OMAN",
"PK": "PAKISTAN",
"PW": "PALAU",
"PS": "PALESTINE, STATE OF",
"PA": "PANAMA",
"PG": "PAPUA NEW GUINEA",
"PY": "PARAGUAY",
"PE": "PERU",
"PH": "PHILIPPINES",
"PN": "PITCAIRN",
"PL": "POLAND",
"PT": "PORTUGAL",
"PR": "PUERTO RICO",
"QA": "QATAR",
"RE": "REUNION",
"RO": "ROMANIA",
"RU": "RUSSIAN FEDERATION",
"RW": "RWANDA",
"BL": "SAINT BARTHELEMY",
"SH": "SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA",
"KN": "SAINT KITTS AND NEVIS",
"LC": "SAINT LUCIA",
"MF": "SAINT MARTIN (FRENCH PART)",
"PM": "SAINT PIERRE AND MIQUELON",
"VC": "SAINT VINCENT AND THE GRENADINES",
"WS": "SAMOA",
"SM": "SAN MARINO",
"ST": "SAO TOME AND PRINCIPE",
"SA": "SAUDI ARABIA",
"SN": "SENEGAL",
"RS": "SERBIA",
"SC": "SEYCHELLES",
"SL": "SIERRA LEONE",
"SG": "SINGAPORE",
"SX": "SINT MAARTEN (DUTCH PART)",
"SK": "SLOVAKIA",
"SI": "SLOVENIA",
"SB": "SOLOMON ISLANDS",
"SO": "SOMALIA",
"ZA": "SOUTH AFRICA",
"GS": "SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS",
"SS": "SOUTH SUDAN",
"ES": "SPAIN",
"LK": "SRI LANKA",
"SD": "SUDAN",
"SR": "SURINAME",
"SJ": "SVALBARD AND JAN MAYEN",
"SZ": "SWAZILAND",
"SE": "SWEDEN",
"CH": "SWITZERLAND",
"SY": "SYRIAN ARAB REPUBLIC",
"TW": "TAIWAN, PROVINCE OF CHINA",
"TJ": "TAJIKISTAN",
"TZ": "TANZANIA, UNITED REPUBLIC OF",
"TH": "THAILAND",
"TL": "TIMOR-LESTE",
"TG": "TOGO",
"TK": "TOKELAU",
"TO": "TONGA",
"TT": "TRINIDAD AND TOBAGO",
"TN": "TUNISIA",
"TR": "TURKEY",
"TM": "TURKMENISTAN",
"TC": "TURKS AND CAICOS ISLANDS",
"TV": "TUVALU",
"UG": "UGANDA",
"UA": "UKRAINE",
"AE": "UNITED ARAB EMIRATES",
"GB": "UNITED KINGDOM",
"US": "UNITED STATES",
"UM": "UNITED STATES MINOR OUTLYING ISLANDS",
"UY": "URUGUAY",
"UZ": "UZBEKISTAN",
"VU": "VANUATU",
"VE": "VENEZUELA, BOLIVARIAN REPUBLIC OF",
"VN": "VIET NAM",
"VG": "VIRGIN ISLANDS, BRITISH",
"VI": "VIRGIN ISLANDS, U.S.",
"WF": "WALLIS AND FUTUNA",
"EH": "WESTERN SAHARA",
"YE": "YEMEN",
"ZM": "ZAMBIA",
"ZW": "ZIMBABWE",
}
|
gpl-3.0
| -3,719,637,293,147,984,000
| 32.642857
| 182
| 0.520579
| false
| 2.929973
| false
| false
| false
|
raymondanthony/youtube-dl
|
youtube_dl/extractor/ustream.py
|
1
|
3720
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
)
class UstreamIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
IE_NAME = 'ustream'
_TEST = {
'url': 'http://www.ustream.tv/recorded/20274954',
'md5': '088f151799e8f572f84eb62f17d73e5c',
'info_dict': {
'id': '20274954',
'ext': 'flv',
'uploader': 'Young Americans for Liberty',
'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('videoID')
# some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
video_id = m.group('videoID')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
video_id = m.group('videoID')
webpage = self._download_webpage(url, video_id)
desktop_video_id = self._html_search_regex(
r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
desktop_url = 'http://www.ustream.tv/recorded/' + desktop_video_id
return self.url_result(desktop_url, 'Ustream')
video_url = 'http://tcdn.ustream.tv/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
webpage, 'title')
uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
webpage, 'uploader', fatal=False, flags=re.DOTALL)
thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': video_title,
'uploader': uploader,
'thumbnail': thumbnail,
}
class UstreamChannelIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
IE_NAME = 'ustream:channel'
_TEST = {
'url': 'http://www.ustream.tv/channel/channeljapan',
'info_dict': {
'id': '10874166',
},
'playlist_mincount': 17,
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = m.group('slug')
webpage = self._download_webpage(url, display_id)
channel_id = self._html_search_meta('ustream:channel_id', webpage)
BASE = 'http://www.ustream.tv'
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
video_ids = []
while next_url:
reply = self._download_json(
compat_urlparse.urljoin(BASE, next_url), display_id,
note='Downloading video information (next: %d)' % (len(video_ids) + 1))
video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
next_url = reply['nextUrl']
entries = [
self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
for vid in video_ids]
return {
'_type': 'playlist',
'id': channel_id,
'display_id': display_id,
'entries': entries,
}
|
unlicense
| 8,401,983,062,399,923,000
| 35.831683
| 102
| 0.53414
| false
| 3.526066
| false
| false
| false
|
valdt/Wumpus
|
server/serverHandler.py
|
1
|
1417
|
import pickle, socket, time, threading
class ServerHandler:
def __init__(self,host,port):
self.activePlayers = []
self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Starting arguments for the socket are general default which i took
self.serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #from the offical python documentation.
self.serverSocket.bind((host, port)) #Lock'n'Load ... bind*
self.serverSocket.listen(10) #lissening for new connections
def pulse(self,payload,clientsocket): #Testing connection to client.
try:
defaultError = ["error","Replie took to long and TTL expired."]
clientsocket.send(pickle.dumps(payload, -1))
ttl = 0
while True:
ttl += 1
data = clientsocket.recv(2048)
if data and data != "":
return pickle.loads(data)
elif ttl > 10:
return defaultError
except:
defaultError = ["error","Function failed"]
return defaultError
def getPlayerNames(self): #Going through all active players grabbing there names and appending them to a list, used in filters arround the program.
playerNames = {}
for player in self.activePlayers:
playerNames[player.name] = player
return playerNames
|
lgpl-3.0
| 3,241,649,935,972,491,000
| 47.862069
| 151
| 0.62103
| false
| 4.455975
| false
| false
| false
|
mcrav/pyxtal
|
xtal/xtal.py
|
1
|
1634
|
import numpy as np
def get_metric_matrix(a, b, c, alpha, beta, gamma):
'''
Create metric matrix as numpy array from unit cell parameters.
Return metric matrix.
'''
return np.array([[a**2, a*b*np.cos(gamma), a*c*np.cos(beta)],
[b*a*np.cos(gamma), b**2, b*c*np.cos(alpha)],
[c*a*np.cos(beta), c*b*np.cos(alpha), c**2]])
def get_bond_distance(atom1_coords, atom2_coords, a, b, c, alpha, beta, gamma):
'''
Get distance between 2 atomic positions. Return distance.
'''
delta1 = a*(atom1_coords[0] - atom2_coords[0])
delta2 = b*(atom1_coords[1] - atom2_coords[1])
delta3 = c*(atom1_coords[2] - atom2_coords[2])
return (np.sqrt(delta1**2 + delta2**2 + delta3**2 +
(2*delta1*delta2*np.cos(gamma)) +
(2*delta1*delta3*np.cos(beta)) +
(2*delta2*delta3*np.cos(alpha))))
def get_bond_angle(atom1_coords, atom2_coords, atom3_coords,
a, b, c, alpha, beta, gamma):
'''
Get angle between 3 atomic positions. Return angle.
'''
r = get_bond_distance(atom1_coords, atom2_coords,
a, b, c, alpha, beta, gamma)
s = get_bond_distance(atom2_coords, atom3_coords,
a, b, c, alpha, beta, gamma)
X1 = np.array(np.array(atom2_coords) - np.array(atom1_coords))
X2 = np.array(np.array(atom2_coords) - np.array(atom3_coords))
metrix_matrix = get_metric_matrix(a, b, c, alpha, beta, gamma)
cosphi = (np.dot(np.dot(np.transpose(X1),metrix_matrix),X2)) / (r*s)
angle = np.arccos(cosphi)
degAngle = np.degrees(angle)
return degAngle
|
mit
| 2,571,340,122,225,649,000
| 39.85
| 79
| 0.586903
| false
| 2.881834
| false
| false
| false
|
nischu7/paramiko
|
paramiko/client.py
|
1
|
21630
|
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{SSHClient}.
"""
from binascii import hexlify
import getpass
import os
import socket
import warnings
from paramiko.agent import Agent
from paramiko.common import *
from paramiko.config import SSH_PORT
from paramiko.dsskey import DSSKey
from paramiko.hostkeys import HostKeys
from paramiko.resource import ResourceManager
from paramiko.rsakey import RSAKey
from paramiko.ssh_exception import SSHException, BadHostKeyException
from paramiko.transport import Transport
from paramiko.util import retry_on_signal
class MissingHostKeyPolicy (object):
"""
Interface for defining the policy that L{SSHClient} should use when the
SSH server's hostname is not in either the system host keys or the
application's keys. Pre-made classes implement policies for automatically
adding the key to the application's L{HostKeys} object (L{AutoAddPolicy}),
and for automatically rejecting the key (L{RejectPolicy}).
This function may be used to ask the user to verify the key, for example.
"""
def missing_host_key(self, client, hostname, key):
"""
Called when an L{SSHClient} receives a server key for a server that
isn't in either the system or local L{HostKeys} object. To accept
the key, simply return. To reject, raised an exception (which will
be passed to the calling application).
"""
pass
class AutoAddPolicy (MissingHostKeyPolicy):
"""
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
client._host_keys.add(hostname, key.get_name(), key)
if client._host_keys_filename is not None:
client.save_host_keys(client._host_keys_filename)
client._log(DEBUG, 'Adding %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
class RejectPolicy (MissingHostKeyPolicy):
"""
Policy for automatically rejecting the unknown hostname & key. This is
used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
client._log(DEBUG, 'Rejecting %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
raise SSHException('Server %r not found in known_hosts' % hostname)
class WarningPolicy (MissingHostKeyPolicy):
"""
Policy for logging a python-style warning for an unknown host key, but
accepting it. This is used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
warnings.warn('Unknown %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
class SSHClient (object):
"""
A high-level representation of a session with an SSH server. This class
wraps L{Transport}, L{Channel}, and L{SFTPClient} to take care of most
aspects of authenticating and opening channels. A typical use case is::
client = SSHClient()
client.load_system_host_keys()
client.connect('ssh.example.com')
stdin, stdout, stderr = client.exec_command('ls -l')
You may pass in explicit overrides for authentication and server host key
checking. The default mechanism is to try to use local key files or an
SSH agent (if one is running).
@since: 1.6
"""
def __init__(self):
"""
Create a new SSHClient.
"""
self._system_host_keys = HostKeys()
self._host_keys = HostKeys()
self._host_keys_filename = None
self._log_channel = None
self._policy = RejectPolicy()
self._transport = None
self._agent = None
def load_system_host_keys(self, filename=None):
"""
Load host keys from a system (read-only) file. Host keys read with
this method will not be saved back by L{save_host_keys}.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts).
If C{filename} is left as C{None}, an attempt will be made to read
keys from the user's local "known hosts" file, as used by OpenSSH,
and no exception will be raised if the file can't be read. This is
probably only useful on posix.
@param filename: the filename to read, or C{None}
@type filename: str
@raise IOError: if a filename was provided and the file could not be
read
"""
if filename is None:
# try the user's .ssh key file, and mask exceptions
filename = os.path.expanduser('~/.ssh/known_hosts')
try:
self._system_host_keys.load(filename)
except IOError:
pass
return
self._system_host_keys.load(filename)
def load_host_keys(self, filename):
"""
Load host keys from a local host-key file. Host keys read with this
method will be checked I{after} keys loaded via L{load_system_host_keys},
but will be saved back by L{save_host_keys} (so they can be modified).
The missing host key policy L{AutoAddPolicy} adds keys to this set and
saves them, when connecting to a previously-unknown server.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts). When automatically saving, the last hostname is used.
@param filename: the filename to read
@type filename: str
@raise IOError: if the filename could not be read
"""
self._host_keys_filename = filename
self._host_keys.load(filename)
def save_host_keys(self, filename):
"""
Save the host keys back to a file. Only the host keys loaded with
L{load_host_keys} (plus any added directly) will be saved -- not any
host keys loaded with L{load_system_host_keys}.
@param filename: the filename to save to
@type filename: str
@raise IOError: if the file could not be written
"""
# update local host keys from file (in case other SSH clients
# have written to the known_hosts file meanwhile.
if self.known_hosts is not None:
self.load_host_keys(self.known_hosts)
f = open(filename, 'w')
for hostname, keys in self._host_keys.items():
for keytype, key in keys.items():
f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
f.close()
def get_host_keys(self):
"""
Get the local L{HostKeys} object. This can be used to examine the
local host keys or change them.
@return: the local host keys
@rtype: L{HostKeys}
"""
return self._host_keys
def set_log_channel(self, name):
"""
Set the channel for logging. The default is C{"paramiko.transport"}
but it can be set to anything you want.
@param name: new channel name for logging
@type name: str
"""
self._log_channel = name
def set_missing_host_key_policy(self, policy):
"""
Set the policy to use when connecting to a server that doesn't have a
host key in either the system or local L{HostKeys} objects. The
default policy is to reject all unknown servers (using L{RejectPolicy}).
You may substitute L{AutoAddPolicy} or write your own policy class.
@param policy: the policy to use when receiving a host key from a
previously-unknown server
@type policy: L{MissingHostKeyPolicy}
"""
self._policy = policy
def connect(self, hostname, port=SSH_PORT, username=None, password=None, pkey=None,
key_filename=None, timeout=None, allow_agent=True, look_for_keys=True,
compress=False, sock=None):
"""
Connect to an SSH server and authenticate to it. The server's host key
is checked against the system host keys (see L{load_system_host_keys})
and any local host keys (L{load_host_keys}). If the server's hostname
is not found in either set of host keys, the missing host key policy
is used (see L{set_missing_host_key_policy}). The default policy is
to reject the key and raise an L{SSHException}.
Authentication is attempted in the following order of priority:
- The C{pkey} or C{key_filename} passed in (if any)
- Any key we can find through an SSH agent
- Any "id_rsa" or "id_dsa" key discoverable in C{~/.ssh/}
- Plain username/password auth, if a password was given
If a private key requires a password to unlock it, and a password is
passed in, that password will be used to attempt to unlock the key.
@param hostname: the server to connect to
@type hostname: str
@param port: the server port to connect to
@type port: int
@param username: the username to authenticate as (defaults to the
current local username)
@type username: str
@param password: a password to use for authentication or for unlocking
a private key
@type password: str
@param pkey: an optional private key to use for authentication
@type pkey: L{PKey}
@param key_filename: the filename, or list of filenames, of optional
private key(s) to try for authentication
@type key_filename: str or list(str)
@param timeout: an optional timeout (in seconds) for the TCP connect
@type timeout: float
@param allow_agent: set to False to disable connecting to the SSH agent
@type allow_agent: bool
@param look_for_keys: set to False to disable searching for discoverable
private key files in C{~/.ssh/}
@type look_for_keys: bool
@param compress: set to True to turn on compression
@type compress: bool
@param sock: an open socket or socket-like object (such as a
L{Channel}) to use for communication to the target host
@type sock: socket
@raise BadHostKeyException: if the server's host key could not be
verified
@raise AuthenticationException: if authentication failed
@raise SSHException: if there was any other error connecting or
establishing an SSH session
@raise socket.error: if a socket error occurred while connecting
"""
if not sock:
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
break
else:
# some OS like AIX don't indicate SOCK_STREAM support, so just guess. :(
af, _, _, _, addr = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
sock = socket.socket(af, socket.SOCK_STREAM)
if timeout is not None:
try:
sock.settimeout(timeout)
except:
pass
retry_on_signal(lambda: sock.connect(addr))
t = self._transport = Transport(sock)
t.use_compression(compress=compress)
if self._log_channel is not None:
t.set_log_channel(self._log_channel)
t.start_client()
ResourceManager.register(self, t)
server_key = t.get_remote_server_key()
keytype = server_key.get_name()
if port == SSH_PORT:
server_hostkey_name = hostname
else:
server_hostkey_name = "[%s]:%d" % (hostname, port)
our_server_key = self._system_host_keys.get(server_hostkey_name, {}).get(keytype, None)
if our_server_key is None:
our_server_key = self._host_keys.get(server_hostkey_name, {}).get(keytype, None)
if our_server_key is None:
# will raise exception if the key is rejected; let that fall out
self._policy.missing_host_key(self, server_hostkey_name, server_key)
# if the callback returns, assume the key is ok
our_server_key = server_key
if server_key != our_server_key:
raise BadHostKeyException(hostname, server_key, our_server_key)
if username is None:
username = getpass.getuser()
if key_filename is None:
key_filenames = []
elif type(key_filename) == str:
key_filenames = [ key_filename ]
else:
key_filenames = key_filename
self._auth(username, password, pkey, key_filenames, allow_agent, look_for_keys)
def close(self):
"""
Close this SSHClient and its underlying L{Transport}.
"""
if self._transport is None:
return
self._transport.close()
self._transport = None
if self._agent != None:
self._agent.close()
self._agent = None
def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False):
"""
Execute a command on the SSH server. A new L{Channel} is opened and
the requested command is executed. The command's input and output
streams are returned as python C{file}-like objects representing
stdin, stdout, and stderr.
@param command: the command to execute
@type command: str
@param bufsize: interpreted the same way as by the built-in C{file()} function in python
@type bufsize: int
@param timeout: set command's channel timeout. See L{Channel.settimeout}.settimeout
@type timeout: int
@return: the stdin, stdout, and stderr of the executing command
@rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile})
@raise SSHException: if the server fails to execute the command
"""
chan = self._transport.open_session()
if(get_pty):
chan.get_pty()
chan.settimeout(timeout)
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr
def invoke_shell(self, term=b'vt100', width=80, height=24, width_pixels=0,
height_pixels=0):
"""
Start an interactive shell session on the SSH server. A new L{Channel}
is opened and connected to a pseudo-terminal using the requested
terminal type and size.
@param term: the terminal type to emulate (for example, C{"vt100"})
@type term: str
@param width: the width (in characters) of the terminal window
@type width: int
@param height: the height (in characters) of the terminal window
@type height: int
@param width_pixels: the width (in pixels) of the terminal window
@type width_pixels: int
@param height_pixels: the height (in pixels) of the terminal window
@type height_pixels: int
@return: a new channel connected to the remote shell
@rtype: L{Channel}
@raise SSHException: if the server fails to invoke a shell
"""
chan = self._transport.open_session()
chan.get_pty(term, width, height, width_pixels, height_pixels)
chan.invoke_shell()
return chan
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
@return: a new SFTP session object
@rtype: L{SFTPClient}
"""
return self._transport.open_sftp_client()
def get_transport(self):
"""
Return the underlying L{Transport} object for this SSH connection.
This can be used to perform lower-level tasks, like opening specific
kinds of channels.
@return: the Transport for this connection
@rtype: L{Transport}
"""
return self._transport
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys):
"""
Try, in order:
- The key passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key, or for
two-factor authentication [for which it is required].)
"""
saved_exception = None
two_factor = False
allowed_types = []
if pkey is not None:
try:
self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
allowed_types = self._transport.auth_publickey(username, pkey)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
except SSHException as e:
saved_exception = e
if not two_factor:
for key_filename in key_filenames:
for pkey_class in (RSAKey, DSSKey):
try:
key = pkey_class.from_private_key_file(key_filename, password)
self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
self._transport.auth_publickey(username, key)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor and allow_agent:
if self._agent == None:
self._agent = Agent()
for key in self._agent.get_keys():
try:
self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor:
keyfiles = []
rsa_key = os.path.expanduser('~/.ssh/id_rsa')
dsa_key = os.path.expanduser('~/.ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
# look in ~/ssh/ for windows users:
rsa_key = os.path.expanduser('~/ssh/id_rsa')
dsa_key = os.path.expanduser('~/ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
if not look_for_keys:
keyfiles = []
for pkey_class, filename in keyfiles:
try:
key = pkey_class.from_private_key_file(filename, password)
self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
except IOError as e:
saved_exception = e
if password is not None:
try:
self._transport.auth_password(username, password)
return
except SSHException as e:
saved_exception = e
elif two_factor:
raise SSHException('Two-factor authentication requires a password')
# if we got an auth-failed exception earlier, re-raise it
if saved_exception is not None:
raise saved_exception
raise SSHException('No authentication methods available')
def _log(self, level, msg):
self._transport._log(level, msg)
|
lgpl-2.1
| 8,268,515,702,681,514,000
| 39.204461
| 139
| 0.605594
| false
| 4.301909
| false
| false
| false
|
MWers/docker-docset
|
bin/add-dash-anchors.py
|
1
|
1248
|
#!/usr/bin/env python
"""
Add Dash docs anchor tags to html source.
"""
import argparse
import os
import re
import sys
import urllib
parser = argparse.ArgumentParser()
parser.add_argument('filename',
help=('The file to add dash doc anchors to.'))
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true')
args = parser.parse_args()
if not os.path.isfile(args.filename):
print 'Error: File %s does not exist' % args.path
sys.exit
html = open(args.filename).read()
# Use regex to add dash docs anchors
def dashrepl(match):
(hopen, id, name, hclose) = match.group(1, 2, 3, 4)
dashname = name
dashname = re.sub('<.*?>', '', dashname)
dashname = re.sub('[^a-zA-Z0-9\.\(\)\?\',:; ]', '-', dashname)
dashname = urllib.quote(dashname)
dash = ('<a name="//apple_ref/cpp/Section/%s" class="dashAnchor"></a>' %
(dashname))
header = '<h%s id="%s">%s</h%s>' % (hopen, id, name, hclose)
return "%s\n%s" % (dash, header)
html = re.sub('<h([1-2]) id="(.*?)">(.*?)</h([1-2])>', dashrepl, html)
with open(args.filename, 'w') as f:
f.write(html)
if args.verbose:
print 'Added dash docs anchors to %s' % args.filename
|
mit
| 5,308,139,273,396,154,000
| 25
| 76
| 0.584135
| false
| 3.096774
| false
| false
| false
|
sontek/bulby
|
setup.py
|
1
|
1741
|
'''
Setup configuration
'''
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
from pip.req import parse_requirements
from pip.download import PipSession
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(
'requirements/install.txt', session=PipSession()
)
reqs = [str(ir.req) for ir in install_reqs]
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, 'CHANGES.rst'), encoding='utf-8') as f:
long_description = '%s\n\n%s' % (long_description, f.read())
setup(
name='bulby',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1.dev0',
description='Manages the phillips hue lightbulbs',
long_description=long_description,
url='https://github.com/sontek/bulby.git',
author='John Anderson',
author_email='sontek@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='etl extract transform load',
packages=find_packages(exclude=['docs', 'tests*']),
install_requires=reqs,
setup_requires=['setuptools-git'],
entry_points={
'paste.app_factory': [
'main=liberator:main',
],
},
)
|
mit
| -981,037,826,163,586,700
| 30.089286
| 78
| 0.677771
| false
| 3.642259
| false
| false
| false
|
ray-project/ray
|
python/ray/util/collective/tests/distributed_gpu_tests/test_distributed_allreduce.py
|
1
|
5765
|
"""Test the collective allreduice API on a distributed Ray cluster."""
import pytest
import ray
from ray.util.collective.types import ReduceOp
import cupy as cp
import torch
from ray.util.collective.tests.util import create_collective_workers
@pytest.mark.parametrize("group_name", ["default", "test", "123?34!"])
@pytest.mark.parametrize("world_size", [2, 3, 4])
def test_allreduce_different_name(ray_start_distributed_2_nodes_4_gpus,
group_name, world_size):
actors, _ = create_collective_workers(
num_workers=world_size, group_name=group_name)
results = ray.get([a.do_allreduce.remote(group_name) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
@pytest.mark.parametrize("array_size", [2, 2**5, 2**10, 2**15, 2**20])
def test_allreduce_different_array_size(ray_start_distributed_2_nodes_4_gpus,
array_size):
world_size = 4
actors, _ = create_collective_workers(world_size)
ray.wait([
a.set_buffer.remote(cp.ones(array_size, dtype=cp.float32))
for a in actors
])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones(
(array_size, ), dtype=cp.float32) * world_size).all()
assert (results[1] == cp.ones(
(array_size, ), dtype=cp.float32) * world_size).all()
def test_allreduce_destroy(ray_start_distributed_2_nodes_4_gpus,
backend="nccl",
group_name="default"):
world_size = 4
actors, _ = create_collective_workers(world_size)
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
# destroy the group and try do work, should fail
ray.get([a.destroy_group.remote() for a in actors])
with pytest.raises(RuntimeError):
results = ray.get([a.do_allreduce.remote() for a in actors])
# reinit the same group and all reduce
ray.get([
actor.init_group.remote(world_size, i, backend, group_name)
for i, actor in enumerate(actors)
])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones(
(10, ), dtype=cp.float32) * world_size * world_size).all()
assert (results[1] == cp.ones(
(10, ), dtype=cp.float32) * world_size * world_size).all()
def test_allreduce_multiple_group(ray_start_distributed_2_nodes_4_gpus,
backend="nccl",
num_groups=5):
world_size = 4
actors, _ = create_collective_workers(world_size)
for group_name in range(1, num_groups):
ray.get([
actor.init_group.remote(world_size, i, backend, str(group_name))
for i, actor in enumerate(actors)
])
for i in range(num_groups):
group_name = "default" if i == 0 else str(i)
results = ray.get([a.do_allreduce.remote(group_name) for a in actors])
assert (results[0] == cp.ones(
(10, ), dtype=cp.float32) * (world_size**(i + 1))).all()
def test_allreduce_different_op(ray_start_distributed_2_nodes_4_gpus):
world_size = 4
actors, _ = create_collective_workers(world_size)
# check product
ray.wait([
a.set_buffer.remote(cp.ones(10, dtype=cp.float32) * (i + 2))
for i, a in enumerate(actors)
])
results = ray.get(
[a.do_allreduce.remote(op=ReduceOp.PRODUCT) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * 120).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * 120).all()
# check min
ray.wait([
a.set_buffer.remote(cp.ones(10, dtype=cp.float32) * (i + 2))
for i, a in enumerate(actors)
])
results = ray.get([a.do_allreduce.remote(op=ReduceOp.MIN) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * 2).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * 2).all()
# check max
ray.wait([
a.set_buffer.remote(cp.ones(10, dtype=cp.float32) * (i + 2))
for i, a in enumerate(actors)
])
results = ray.get([a.do_allreduce.remote(op=ReduceOp.MAX) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * 5).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * 5).all()
@pytest.mark.parametrize("dtype",
[cp.uint8, cp.float16, cp.float32, cp.float64])
def test_allreduce_different_dtype(ray_start_distributed_2_nodes_4_gpus,
dtype):
world_size = 4
actors, _ = create_collective_workers(world_size)
ray.wait([a.set_buffer.remote(cp.ones(10, dtype=dtype)) for a in actors])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones((10, ), dtype=dtype) * world_size).all()
assert (results[1] == cp.ones((10, ), dtype=dtype) * world_size).all()
def test_allreduce_torch_cupy(ray_start_distributed_2_nodes_4_gpus):
# import torch
world_size = 4
actors, _ = create_collective_workers(world_size)
ray.wait([actors[1].set_buffer.remote(torch.ones(10, ).cuda())])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones((10, )) * world_size).all()
ray.wait([actors[0].set_buffer.remote(torch.ones(10, ))])
ray.wait([actors[1].set_buffer.remote(cp.ones(10, ))])
with pytest.raises(RuntimeError):
results = ray.get([a.do_allreduce.remote() for a in actors])
|
apache-2.0
| -6,473,080,763,864,086,000
| 40.47482
| 79
| 0.608846
| false
| 3.121278
| true
| false
| false
|
emmanuelle/scikits.image
|
skimage/io/_io.py
|
2
|
5788
|
__all__ = ['Image', 'imread', 'imread_collection', 'imsave', 'imshow', 'show',
'push', 'pop']
from skimage.io._plugins import call as call_plugin
from skimage.color import rgb2grey
import numpy as np
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Shared image queue
_image_stack = []
class Image(np.ndarray):
"""Class representing Image data.
These objects have tags for image metadata and IPython display protocol
methods for image display.
"""
tags = {'filename': '',
'EXIF': {},
'info': {}}
def __new__(cls, arr, **kwargs):
"""Set the image data and tags according to given parameters.
Input:
------
arr : ndarray
Image data.
kwargs : Image tags as keywords
Specified in the form ``tag0=value``, ``tag1=value``.
"""
x = np.asarray(arr).view(cls)
for tag, value in Image.tags.items():
setattr(x, tag, kwargs.get(tag, getattr(arr, tag, value)))
return x
def _repr_png_(self):
return self._repr_image_format('png')
def _repr_jpeg_(self):
return self._repr_image_format('jpeg')
def _repr_image_format(self, format_str):
str_buffer = StringIO.StringIO()
imsave(str_buffer, self, format_str=format_str)
return_str = str_buffer.getvalue()
str_buffer.close()
return return_str
def push(img):
"""Push an image onto the shared image stack.
Parameters
----------
img : ndarray
Image to push.
"""
if not isinstance(img, np.ndarray):
raise ValueError("Can only push ndarrays to the image stack.")
_image_stack.append(img)
def pop():
"""Pop an image from the shared image stack.
Returns
-------
img : ndarray
Image popped from the stack.
"""
return _image_stack.pop()
def imread(fname, as_grey=False, plugin=None, flatten=None,
**plugin_args):
"""Load an image from file.
Parameters
----------
fname : string
Image file name, e.g. ``test.jpg``.
as_grey : bool
If True, convert color images to grey-scale (32-bit floats).
Images that are already in grey-scale format are not converted.
plugin : str
Name of plugin to use (Python Imaging Library by default).
Other Parameters
----------------
flatten : bool
Backward compatible keyword, superseded by `as_grey`.
Returns
-------
img_array : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
# Backward compatibility
if flatten is not None:
as_grey = flatten
img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
if as_grey and getattr(img, 'ndim', 0) >= 3:
img = rgb2grey(img)
return Image(img)
def imread_collection(load_pattern, conserve_memory=True,
plugin=None, **plugin_args):
"""
Load a collection of images.
Parameters
----------
load_pattern : str or list
List of objects to load. These are usually filenames, but may
vary depending on the currently active plugin. See the docstring
for ``ImageCollection`` for the default behaviour of this parameter.
conserve_memory : bool, optional
If True, never keep more than one in memory at a specific
time. Otherwise, images will be cached once they are loaded.
Returns
-------
ic : ImageCollection
Collection of images.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
return call_plugin('imread_collection', load_pattern, conserve_memory,
plugin=plugin, **plugin_args)
def imsave(fname, arr, plugin=None, **plugin_args):
"""Save an image to file.
Parameters
----------
fname : str
Target filename.
arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)
Image data.
plugin : str
Name of plugin to use. By default, the different plugins are
tried (starting with the Python Imaging Library) until a suitable
candidate is found.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)
def imshow(arr, plugin=None, **plugin_args):
"""Display an image.
Parameters
----------
arr : ndarray or str
Image data or name of image file.
plugin : str
Name of plugin to use. By default, the different plugins are
tried (starting with the Python Imaging Library) until a suitable
candidate is found.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
if isinstance(arr, basestring):
arr = call_plugin('imread', arr, plugin=plugin)
return call_plugin('imshow', arr, plugin=plugin, **plugin_args)
def show():
'''Display pending images.
Launch the event loop of the current gui plugin, and display all
pending images, queued via `imshow`. This is required when using
`imshow` from non-interactive scripts.
A call to `show` will block execution of code until all windows
have been closed.
Examples
--------
>>> import skimage.io as io
>>> for i in range(4):
... io.imshow(np.random.random((50, 50)))
>>> io.show()
'''
return call_plugin('_app_show')
|
bsd-3-clause
| 2,946,779,465,856,008,000
| 24.724444
| 78
| 0.596579
| false
| 4.167027
| false
| false
| false
|
whcacademy/imageDownloader
|
googleImageDownload.py
|
1
|
7660
|
import requests
import os
import re
import time
from selenium import webdriver
import multiprocessing
import sys
from socket import error as SocketError
import errno
import argparse
import imghdr
import uuid
import csv
import codecs
import platform
import downloader
# define default chrome download path
global default_download_path
default_download_path = os.path.join(os.getcwd(), 'download_urls')
if not os.path.exists(default_download_path):
os.mkdir(default_download_path)
global isWindows
if re.search('windows', platform.platform(), re.IGNORECASE):
isWindows = True
else:
isWindows = False
# use selenium to get the list of URLs
def openBrowserRecursively(total, idName, browser):
try:
for i in range(total):
iterator = i * 100
url = r"https://www.google.com/search?q={word}&newwindow=1&biw=300&bih=629&tbm=isch&ijn={times}&start={start}"
try:
browser.get(url.format(word= idName, start=iterator,times = i))
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise # raise to reset the connection
pass
time.sleep(1.5) # 1.5 seconds is the tuned time for HKU service not to be monitored and closed
except:
if isWindows:
os.system("taskkill /im chrome.exe /F")
else :
os.system("kill " + str(os.getpid()))
openBrowserRecursively(total, idName, browser)
# basic session setup
def setupSession():
session = requests.Session()
session.header = { 'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0","Accept-Encoding": "gzip, deflate, sdch"}
return session
class GoogleDownloader():
def __init__(self, nameList, root, size, process, browser):
assert browser != None, "drive cannot be None!"
self.process = process
self.browser = browser
self.nameList = nameList
self.size = size
self.root = root
# main crawling start
def run(self):
for i in nameList:
self.oneID(i)
def oneID(self, name):
wordSearch = ''
subcategory = name.split(' ')
name = name.replace(' ', '_')
wordSearch = subcategory[0]
if len(subcategory[1:]) >= 1:
for pt in subcategory[1:]:
wordSearch += "+" + pt
print (wordSearch.encode('utf-8'))
total = int(self.size / 100)
openBrowserRecursively(total, wordSearch, self.browser)
# after trigger getting the file list, then the file will be
# download but name with f.txt
global default_download_path
filepath = default_download_path
try:
for i in range(total):
iterator = i * 100
filename = os.path.join("results", name +".txt")
newName = name + '_' + str(i) +'.txt'
# here is the hardcode part
# one may change to his or her own default downloading folder
if i == 0:
if "f.txt" in os.listdir(filepath):
print ("change name to be " , newName.encode('utf-8'))
os.rename(os.path.join(filepath,'f.txt'), os.path.join(filepath,newName))
else:
fileSpecial = "f (%d).txt" % i
if fileSpecial in os.listdir(filepath):
print ("change name to be " , newName.encode('utf-8'))
os.rename(os.path.join(filepath,fileSpecial), os.path.join(filepath,newName))
else:
print ("fail to find the file")
except:
print("something bad happen, maybe encountering some repeated names")
os.remove(os.path.join(filepath, 'f.txt'))
return
# after rename and locate the url list, then we conduct the final crawling part
indexList = [i for i in range(1, 101)]
try:
folderName = self.makeFolder(name)
for i in range(total):
newName = name + '_' + str(i) +'.txt'
with codecs.open(os.path.join(filepath,newName),'r', encoding="utf-8") as myfile:
file1 = myfile.read()
results = re.findall(r'"ou":"(.+?)"',file1)
self.process.map(_download,
zip(results, [folderName] * len(results), indexList[:len(results)]))
fileList = os.listdir(folderName)
self.dump_imInfo(folderName, sorted(fileList, key=lambda x: int(x.split('.')[0])), results)
except IOError:
print ("can not find the file called:" , str(newName).encode('utf-8') , "and it may be caused by the bad connection or bad file got from server")
def makeFolder(self, fileName):
try:
if not os.path.exists(os.path.join(self.root, fileName)):
os.mkdir(os.path.join(self.root, fileName))
else:
print('duplicated root name')
except OSError as e:
if e.errno != 17:
raise
else:
pass
return os.path.join(self.root, fileName)
def dump_imInfo(self, folderName, fileList, results):
try:
with open(os.path.join(folderName, 'imInfo.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['img_name', 'uuid', 'url'])
for file in fileList:
index = int(file.split('.')[0])
writer.writerow([index,str(uuid.uuid4().hex),str(results[index-1])])
except:
print('error happens when writing imageInfo, maybe caused by duplicated name')
# function to get one image specified with one url
def _download(args):
url, folderName, index = args
session = setupSession()
try:
# time out is another parameter tuned
# fit for the network about 10Mb
image = session.get(url, timeout = 5)
imageName = str(index)
with open(os.path.join(folderName, imageName),'wb') as fout:
fout.write(image.content)
fileExtension = imghdr.what(os.path.join(folderName, imageName))
if fileExtension is None:
os.remove(os.path.join(folderName, imageName))
else:
newName = imageName + '.' + str(fileExtension)
os.rename(os.path.join(folderName, imageName), os.path.join(folderName, newName))
except Exception as e:
print ("failed to download one pages with url of " + str(url))
# basic funciton to get id list
def readFile(filename):
_list=[]
with codecs.open (filename, 'r', encoding='utf-8') as fin:
line = fin.readline()
while line:
_list.append(str(line).rstrip())
line = fin.readline()
return _list
def arg_parse():
parser = argparse.ArgumentParser(description='Argument Parser for google image downloader')
parser.add_argument('--root', help='output file root',
default='results', type=str)
parser.add_argument('--filename', help='the name of the file which constain the id',
default='testlist.txt', type=str)
parser.add_argument('--size', help='number of image per id',
default=100, type=int)
parser.add_argument('--process', help='number of process in parallel',
default=100, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = arg_parse()
start = time.time()
assert args.filename != None, "Name list cannot be None!"
# get all id as type of list of str
nameList = list(set(readFile(args.filename)))
# init processPool and browser driver
processPool = multiprocessing.Pool(args.process)
# init chrome driver with customized default download path
chromeOptions = webdriver.ChromeOptions()
preference = {'download.default_directory' : default_download_path,
'download.prompt_for_download': False}
chromeOptions.add_experimental_option("prefs",preference)
if isWindows:
chromedriver = os.path.join(os.getcwd(),'chromedriver.exe')
else:
chromedriver = os.path.join(os.getcwd(),'chromedriver')
browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=chromeOptions)
# check if the output folder exists or not
if not os.path.exists(args.root):
os.mkdir(args.root)
# construct the downloader instance
gdownloader = GoogleDownloader(nameList = nameList, root = args.root, size = args.size,
process = processPool, browser = browser)
gdownloader.run()
# finish running
end = time.time()
browser.close()
print ('task end, time consumed:', end - start, 'seconds')
|
mit
| -4,718,019,722,629,552,000
| 32.160173
| 154
| 0.692298
| false
| 3.20368
| false
| false
| false
|
yashwardhan7/PiCapture
|
PiCapture.py
|
1
|
8249
|
#!/usr/bin/python
import os
import sys
import time
import glob
import shutil
import argparse
import datetime
import threading
import subprocess
logOnConsole = False
def log(str):
global logOnConsole
if logOnConsole:
print str
def initializeDir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
log('Created directory: {0}'.format(dirname))
def renameCapturedFiles(dirname, filePrefix, fileExtension):
capturedFiles = glob.glob('{0}/{1}*{2}'.format(dirname, filePrefix, fileExtension))
for file in capturedFiles:
newFilename = datetime.datetime.fromtimestamp(os.path.getctime(file)).strftime(
'{0}/%H%M%S{1}'.format(dirname, os.path.splitext(file)[1]))
os.rename(file, newFilename)
log('renamed {0} -> {1}'.format(file, newFilename))
def cmpImages(img1, img2):
if not os.path.isfile(img1):
return False
if not os.path.isfile(img2):
return False
# first check if the two images are different in size by a threshold
sz1 = os.stat(img1).st_size
sz2 = os.stat(img2).st_size
s1 = max(sz1,sz2)
s2 = max(1, min(sz1,sz2))
perc = ((s1/s2) - 1) * 100
if perc > 20:
return False
# next check the result of perceptual diff
try:
cmd = 'perceptualdiff -downsample 3 -colorfactor 0 {0} {1}'.format(img1, img2)
subprocess.check_output(cmd.split(), shell=False)
return True
except subprocess.CalledProcessError:
return False
except OSError:
print 'Error running perceptualdiff. Run apt-get install perceptualdiff.'
return False
def freeDiskSpace(dir):
for i in range(10): # retry few times
st = os.statvfs('/')
bavail = st.f_frsize * st.f_bavail # available disk space in bytes
if bavail < (1024*1024*512): # if available disk space is less than a threshold, free some more
canDelete = [os.path.join(dir, o) for o in sorted(os.listdir(dir)) if os.path.isdir(os.path.join(dir, o))]
if len(canDelete) <= 1:
break
log('freeing disk-space by deleting: {0}'.format(canDelete[0]))
shutil.rmtree(canDelete[0])
else:
break
def killProc(proc):
if proc:
proc.terminate()
def encodeTimelapseVideo(dir, fps):
# create symbolic link for *.jpg
# this is to workaround avconv issue with handling input file list
images = sorted(glob.glob('{0}/*.jpg'.format(dir)))
i=0
for img in images:
slnk = '{0}/img{1:0>6}.jpg'.format(dir, i)
log('symlink {0} --> {1}'.format(img, slnk))
try:
os.symlink(os.path.abspath(img), os.path.abspath(slnk))
except OSError:
pass
i+=1
# run avconv
cmd = 'avconv -r {0} -i {1}/img%06d.jpg -vcodec libx264 -crf 26 -g 15 -vf scale=576:352 -y {1}/vid.mp4'.format(fps, dir)
try:
log('Encoding video {0}'.format(dir))
subprocess.check_call(cmd.split(), shell=False)
except subprocess.CalledProcessError:
print 'Encoding failed.'
except OSError:
print 'Error running avconv. Run apt-get install libav-tools.'
# remove symlinks
slnks=glob.glob('{0}/img*.jpg'.format(dir))
for slnk in slnks:
log('remove symlink {0}'.format(slnk))
try:
os.remove(slnk)
except OSError:
pass
runBGThread=False
def bgThread(timeLapse, dir, imgPrefix, imgExt):
global runBGThread
log('Starting bgThread {0}'.format(dir))
while runBGThread:
try:
renameCapturedFiles(dir, imgPrefix, imgExt)
# process (erase similar images) recently captured images (.jpeg)
images = sorted(glob.glob('{0}/*{1}'.format(dir, imgExt)))
cImages = len(images)
if cImages <= 1:
time.sleep(timeLapse*4)
# if no more images were captured even after sleeping, exit this thread
if len(sorted(glob.glob('{0}/*{1}'.format(dir, imgExt)))) == cImages:
break
continue
prevImg = None
for img in images:
if not runBGThread:
renameCapturedFiles(dir, imgPrefix, imgExt)
break
if prevImg:
if cmpImages(prevImg, img):
# img is similar to prevImg, delete prevImg
os.remove(prevImg)
log('deleting dup: {0}'.format(prevImg))
else:
# prevImg is different than img, keep it and
# rename to .jpg so we dont process it again in next outer loop cycle
os.rename(prevImg, '{0}.jpg'.format(os.path.splitext(prevImg)[0]))
prevImg = img
except Exception, ex:
print "Exception in bgThread: {0} - {1}".format(type(ex).__name__, ex)
encodeTimelapseVideo(dir, 7)
log('Ending bgThread {0}'.format(dir))
# end bgThread
noirOptimization = '-ex night -drc high'
flipImage = '-hf -vf'
def captureImages(storageRoot, timeLapse=15):
global runBGThread
threadObj = None
bgThreadDir = None
filePrefix = 'img'
fileExt = '.jpeg'
while True:
try:
freeDiskSpace(storageRoot) # free disk space before starting capture
dt = datetime.datetime.now()
timeLeft = 86400 - (dt.hour*3600 + dt.minute*60 + dt.second)
runDuration = 600 # 10 min
if timeLeft < runDuration:
runDuration = timeLeft
# capture atleast 1 shot in a run
if timeLapse > runDuration:
timeLapse = runDuration
# start a run
currentDirname = '{0}/{1}'.format(storageRoot, dt.date().strftime('%Y%m%d'))
initializeDir(currentDirname)
cmdline = 'raspistill -w 1280 -h 960 --thumb none --exif none -n -q 50 -tl {0} -t {1} -o {2}'.format(
timeLapse*1000, runDuration*1000, '{0}/{1}%05d{2}'.format(currentDirname, filePrefix, fileExt))
proc = subprocess.Popen(cmdline.split() + noirOptimization.split(), shell=False)
log('Capturing images (pid={0}) to {1}'.format(proc.pid, currentDirname))
if (currentDirname != bgThreadDir) or (threadObj is None) or (not threadObj.isAlive()):
# if we are capturing in a different directory than bgThreadDir, start a new thread
# this thread will auto-exit when there are no new images being captured for currentDirname
runBGThread = True
bgThreadDir = currentDirname
threadObj = threading.Thread(target=bgThread, args=[timeLapse, bgThreadDir, filePrefix, fileExt])
threadObj.start()
time.sleep(runDuration)
killProc(proc)
except KeyboardInterrupt:
killProc(proc)
runBGThread = False # signal all bgthreads to exit
print 'waiting for background worker threads to exit'
return
def captureVideo(storageRoot, captureSpeed, videoStabilization):
filePrefix = 'vid'
fileExt = '.h264'
while True:
try:
freeDiskSpace(storageRoot) # free disk space before starting capture
dt = datetime.datetime.now()
runDuration = 86400 - (dt.hour*3600 + dt.minute*60 + dt.second)
# start a run
currentDirname = '{0}/{1}'.format(storageRoot, dt.date().strftime('%Y%m%d'))
initializeDir(currentDirname)
filename = '{0}/{1}00{2}'.format(currentDirname, filePrefix, fileExt)
cmdline = 'raspivid -w 800 -h 600 -qp 25 -fps {0} -t {1} -o {2}'.format(
30/captureSpeed, runDuration*1000, filename)
if videoStabilization:
cmdline += ' -vs'
proc = subprocess.Popen(cmdline.split() + noirOptimization.split(), shell=False)
log('Capturing video (pid={0}) to {1} @ {2}x'.format(proc.pid, filename, captureSpeed))
time.sleep(runDuration)
killProc(proc)
renameCapturedFiles(currentDirname, filePrefix, fileExt)
except KeyboardInterrupt:
killProc(proc)
renameCapturedFiles(currentDirname, filePrefix, fileExt)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='RapberryPi timelapse/video capture helper. Requires perceptualdiff which is used to cleanup duplicate captures in a timelapse.'
)
parser.add_argument('-d', metavar='directory', default='./cam', help='Directory where captured files are stored. Default: ./cam')
parser.add_argument('-l', action='store_true', default=False, help='Log information on console')
parser.add_argument('-t', metavar='seconds', type=int, help='Start timelapse capture with given duration in seconds')
parser.add_argument('-v', action='store_true', help='Start video capture')
parser.add_argument('-vf', metavar='speed_factor', default=2, type=int, help='Changes captured video speed by given factor. Default: 2')
parser.add_argument('-vs', action='store_true', default=False, help='Turn on video stabilization')
args = parser.parse_args()
logOnConsole = args.l
storageRoot = args.d
if args.v:
captureVideo(storageRoot, args.vf, args.vs)
elif args.t:
captureImages(storageRoot, args.t)
else:
parser.print_help()
|
mit
| 1,738,208,288,995,955,200
| 35.662222
| 142
| 0.697903
| false
| 3.089513
| false
| false
| false
|
RuthAngus/kalesalad
|
code/kalesalad.py
|
1
|
6367
|
# Uses acf method to measure rotation periods for downloaded everest light
# curves.
import numpy as np
import matplotlib.pyplot as plt
import pyfits
from Kepler_ACF import corr_run
import os
from simple_acf import simple_acf
import sys
from multiprocessing import Pool
import pandas as pd
import glob
import astropy.stats as sps
import rotation as ro
import datetime
plotpar = {'axes.labelsize': 20,
'text.fontsize': 20,
'legend.fontsize': 20,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True}
plt.rcParams.update(plotpar)
def sigma_clip(y, nsigma=3, npoints=100):
"""
Sigma clipping for light curves.
"""
new_y = []
x = np.linspace(0, 100, len(y))
for i in range(int(len(y)/npoints)):
# section = y[i:i + npoints]
section = y[i*npoints:(i + 1)*npoints]
med, std = np.median(section), np.std(section)
mask = (med - nsigma*std < section) * (section < med + nsigma*std)
new_y.append(section[mask])
last_bit = y[(i+1)*npoints:]
med, std = np.median(last_bit), np.std(last_bit)
mask = (med - nsigma*std < last_bit) * (last_bit < med + nsigma*std)
new_y.append(last_bit[mask])
filtered_y = np.array([i for j in new_y for i in j])
return filtered_y
def process_data(file, c):
"""
Read the lightcurve from the fits format and sigma clip.
prefix (str): the 4 digit number at the beginning of the epic id, e.g.
"2011".
id (str): the 4 digit number at the end of the epic id, e.g. "26368".
c (str): campaign. e.g. "01"
"""
with pyfits.open(file) as hdulist:
time, flux = hdulist[1].data["TIME"], hdulist[1].data["FLUX"]
# out = hdulist[1].data["OUTLIER"]
m = np.isfinite(time) * np.isfinite(flux) #* (out < 1)
x, med = time[m], np.median(flux[m])
y = flux[m]/med - 1 # median normalise
yerr = np.ones_like(y) * 1e-5
if c == "1":
cut = 100
x, y, yerr = x[cut:], y[cut:], yerr[cut:]
# Sigma clip
filtered_y = sigma_clip(y)
m = np.nonzero(np.in1d(y, filtered_y))[0]
return x[m], y[m], yerr[m]
def run_acf(c, epic, clobber=False, plot=True):
"""
Run the ACF on a light curve in the specified campaign.
FOR PARALLEL RUNS.
c (str): campaign, e.g. "c01".
fn (str): fits file name for a target in campaign c.
"""
#period, acf_smooth, lags, rvar, peaks, dips, leftdips, rightdips, \
#bigpeaks = simple_acf(x, y)
v = "2.0"
filen = "hlsp_everest_k2_llc_{0}-c{1}_kepler_v{2}_lc.fits"\
.format(epic, c.zfill(2), v)
file = "data/c{0}/{1}".format(c.zfill(2), filen)
# Load time and flux
if not os.path.exists(file):
print(file, "file not found")
return None
try:
x, y, yerr = process_data(file, c=c)
except (IOError, ValueError):
print("Bad file", file)
return None
# compute the acf
period, acf_smooth, lags, rvar, peaks = simple_acf(x, y)
# make a plot
if plot:
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(x-x[0], y, "k.")
plt.xlim(0, max(lags))
plt.xlabel("$\mathrm{Time~(days)}$")
plt.ylabel("$\mathrm{Normalised~flux}$")
plt.subplot(2, 1, 2)
plt.plot(lags, acf_smooth, "k")
plt.xlabel("$\mathrm{lags~(days)}$")
plt.ylabel("$\mathrm{ACF}$")
plt.axvline(period, color="m")
plt.xlim(min(lags), max(lags))
plt.subplots_adjust(left=.16, bottom=.12, hspace=.4)
plt.savefig("acfs/{}_acf".format(epic))
# Measure LS period
star = ro.prot(kepid=epic, x=x, y=y, yerr=yerr)
pgram_period = star.pgram_ps(filter_period=10, plot=True, cutoff=30,
clobber=clobber)
return epic, period
def run_kalesalad(c, N, clobber=False):
"""
Measure all rotation periods in a campaign - non parallel (for tests).
"""
todays_date = datetime.date.today()
results_file = "c{0}_periods_{1}.txt".format(c, todays_date)
assert not os.path.exists(results_file), "Old data file found, delete " \
"before proceeding"
with open(results_file, "a") as f:
f.write("{0} {1} {2} {3}\n".format("epic_id", "ACF_period",
"pgram_period",
"pgram_period_err"))
# df = pd.read_csv("c{}_targets.txt".format(c.zfill(2)), dtype=str)
df = pd.read_csv("tgas_epic_dwarfs.csv")
epic_ids = df.epic_number[df.k2_campaign_str=="{}".format(int(c))]
acf_periods, pgram_periods, pgram_period_errs, epics = [np.zeros(N) for i
in range(4)]
for i, epic in enumerate(epic_ids[:N]):
v = "2.0"
filen = "hlsp_everest_k2_llc_{0}-c{1}_kepler_v{2}_lc.fits"\
.format(epic, c.zfill(2), v)
file = "data/c{0}/{1}".format(c.zfill(2), filen)
# Load time and flux
if os.path.exists(file):
try:
x, y, yerr = process_data(file, c=c)
except (IOError, ValueError):
print("Bad file", file)
return None
# Measure ACF period
_, acf_period = run_acf(c, epic, clobber=clobber, plot=True)
# Measure LS period
star = ro.prot(kepid=epic, x=x, y=y, yerr=yerr)
pgram_period = star.pgram_ps(plot=True)
with open(results_file, "a") as f:
f.write("{0} {1} {2} {3}\n".format(epic, acf_period,
pgram_period[0],
pgram_period[1]))
else:
print(file, "file not found")
if __name__ == "__main__":
from functools import partial
c = str(sys.argv[1])
# open("c{0}_periods.txt".format(c), "w")
run_kalesalad(c, 196, clobber=True)
# df = pd.read_csv("c{}_targets.txt".format(c.zfill(2)), dtype=str)
# fns = df["epid"].values
# f = partial(run_acf, c)
# pool = Pool()
# for val in pool.map(f, fns):
# if val is None:
# continue
# epic, acf_period, epic_period = val
# # append data to file
# with open("c{0}_periods.txt".format(c), "a") as f:
# f.write("{0} {1} \n".format(epic, period))
|
mit
| 9,121,794,386,829,699,000
| 31.484694
| 77
| 0.54594
| false
| 3.031905
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.