text stringlengths 4 1.02M | meta dict |
|---|---|
import os
import shutil
import sys
import tempfile
import django
TEST_ROOT = os.path.realpath(os.path.dirname(__file__))
RUNTESTS_DIR = os.path.join(TEST_ROOT, 'xtests')
sys.path.insert(0, os.path.join(TEST_ROOT, os.pardir))
sys.path.insert(0, RUNTESTS_DIR)
TEST_TEMPLATE_DIR = 'templates'
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
ALWAYS_INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'nadmin',
'crispy_forms',
]
def get_test_modules():
modules = []
for f in os.listdir(RUNTESTS_DIR):
if (f.startswith('__init__') or
f.startswith('.') or
f.startswith('sql') or not os.path.isdir(os.path.join(RUNTESTS_DIR, f))):
continue
modules.append(f)
return modules
def setup(verbosity, test_labels):
from django.conf import settings
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'USE_I18N': settings.USE_I18N,
'LOGIN_URL': settings.LOGIN_URL,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.USE_I18N = True
settings.LANGUAGE_CODE = 'en'
settings.MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
)
settings.SITE_ID = 1
# For testing comment-utils, we require the MANAGERS attribute
# to be set, so that a test email is sent out which we catch
# in our tests.
settings.MANAGERS = ("admin@nadmin.io",)
# Load all the ALWAYS_INSTALLED_APPS.
# (This import statement is intentionally delayed until after we
# access settings because of the USE_I18N dependency.)
django.setup()
from django.db.models.loading import get_apps, load_app
get_apps()
# Load all the test model apps.
test_labels_set = set([label.split('.')[0] for label in test_labels])
test_modules = get_test_modules()
for module_name in test_modules:
module_label = module_name
# if the module was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to the list to test.
if not test_labels or module_name in test_labels_set:
if verbosity >= 2:
print "Importing application %s" % module_name
mod = load_app(module_label)
if mod:
if module_label not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(module_label)
return state
def teardown(state):
from django.conf import settings
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(unicode(TEMP_DIR))
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=verbosity, interactive=interactive,
failfast=failfast)
failures = test_runner.run_tests(test_labels or get_test_modules(), extra_tests=extra_tests)
teardown(state)
return failures
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [module module module ...]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v','--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all '
'output')
parser.add_option(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_option(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.')
parser.add_option(
'--liveserver', action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.'),
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
elif "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
else:
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
failures = django_tests(int(options.verbosity), options.interactive,
options.failfast, args)
if failures:
sys.exit(bool(failures))
| {
"content_hash": "00453c70a930e120cdda5dc2e561138c",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 96,
"avg_line_length": 37.741176470588236,
"alnum_prop": 0.6550810473815462,
"repo_name": "A425/django-nadmin",
"id": "a27ffd1209932d5a7eb44490ccbf8ee2f7ddba12",
"size": "6438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/runtests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23733"
},
{
"name": "HTML",
"bytes": "95746"
},
{
"name": "JavaScript",
"bytes": "66338"
},
{
"name": "Python",
"bytes": "413023"
}
],
"symlink_target": ""
} |
from historia.country.models.country import Country
from historia.country.models.government import Government
from historia.country.models.province import Province
| {
"content_hash": "b107e66550244ea0e7141c055ea7c937",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 57,
"avg_line_length": 54.666666666666664,
"alnum_prop": 0.8719512195121951,
"repo_name": "eranimo/historia",
"id": "a4fcf5bc9ec23eee4b33cf805ba7df58c27080e9",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "historia/country/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133779"
},
{
"name": "Shell",
"bytes": "490"
}
],
"symlink_target": ""
} |
import dash
import pytest
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash import html
from dash.dash_table import DataTable
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import pandas as pd
url = "https://github.com/plotly/datasets/raw/master/" "26k-consumer-complaints.csv"
rawDf = pd.read_csv(url)
df = rawDf.to_dict("records")
def get_app():
app = dash.Dash(__name__)
app.layout = html.Div(
[
DataTable(
id="table",
data=df[0:250],
columns=[
{"name": i, "id": i, "hideable": i == "Complaint ID"}
for i in rawDf.columns
],
editable=True,
sort_action="native",
include_headers_on_copy_paste=True,
),
DataTable(
id="table2",
data=df[0:10],
columns=[
{"name": i, "id": i, "deletable": True} for i in rawDf.columns
],
editable=True,
sort_action="native",
include_headers_on_copy_paste=True,
),
DataTable(
id="table3",
data=df[0:10],
columns=[
{"name": i, "id": i, "deletable": True} for i in rawDf.columns
],
cell_selectable=False,
sort_action="native",
),
DataTable(
id="table4",
data=[
{"string": 'a""b', "int": 10},
{"string": 'hello\n""hi', "int": 11},
],
columns=[
{"name": "string", "id": "string"},
{"name": "int", "id": "int"},
],
editable=True,
sort_action="native",
include_headers_on_copy_paste=True,
),
]
)
@app.callback(
Output("table", "data"),
[Input("table", "data_timestamp")],
[State("table", "data"), State("table", "data_previous")],
)
# pylint: disable=unused-argument
def update_data(timestamp, current, previous):
# pylint: enable=unused-argument
if timestamp is None or current is None or previous is None:
raise PreventUpdate
modified = False
if len(current) == len(previous):
for (i, datum) in enumerate(current):
previous_datum = previous[i]
if datum["Unnamed: 0"] != previous_datum["Unnamed: 0"]:
datum["Complaint ID"] = "MODIFIED"
modified = True
if modified:
return current
else:
raise PreventUpdate
return app
@pytest.mark.skip(
reason="Prop `data_previous` is not correctly updated with copy+paste"
)
def test_tbcp001_copy_paste_callback(test):
test.start_server(get_app())
target = test.table("table")
target.cell(0, 0).click()
test.copy()
target.cell(1, 0).click()
test.paste()
assert target.cell(1, 0).get_text() == "0"
assert target.cell(1, 1).get_text() == "MODIFIED"
assert test.get_log_errors() == []
@pytest.mark.skip(
reason="Prop `data_previous` is not correctly updated with copy+paste"
)
def test_tbcp002_sorted_copy_paste_callback(test):
test.start_server(get_app())
target = test.table("table")
target.column(rawDf.columns[2]).sort()
assert target.cell(0, 0).get_text() == "11"
target.cell(0, 0).click()
test.copy()
target.cell(1, 0).click()
test.paste()
assert target.cell(1, 0).get_text() == "11"
assert target.cell(1, 1).get_text() == "MODIFIED"
target.cell(1, 1).click()
test.copy()
target.cell(2, 1).click()
test.paste()
assert target.cell(1, 0).get_text() == "11"
assert target.cell(2, 1).get_text() == "MODIFIED"
assert test.get_log_errors() == []
@pytest.mark.parametrize("mouse_navigation", [True, False])
@pytest.mark.skip(
reason="Prop `data_previous` is not correctly updated with copy+paste"
)
def test_tbcp003_copy_multiple_rows(test, mouse_navigation):
test.start_server(get_app())
target = test.table("table")
if mouse_navigation:
with test.hold(Keys.SHIFT):
target.cell(0, 0).click()
target.cell(2, 0).click()
else:
target.cell(0, 0).click()
with test.hold(Keys.SHIFT):
test.send_keys(Keys.ARROW_DOWN + Keys.ARROW_DOWN)
test.copy()
target.cell(3, 0).click()
test.paste()
for i in range(3):
assert target.cell(i + 3, 0).get_text() == target.cell(i, 0).get_text()
assert target.cell(i + 3, 1).get_text() == "MODIFIED"
assert test.get_log_errors() == []
def test_tbcp004_copy_9_and_10(test):
test.start_server(get_app())
source = test.table("table")
target = test.table("table2")
source.cell(9, 0).click()
with test.hold(Keys.SHIFT):
ActionChains(test.driver).send_keys(Keys.DOWN).perform()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(2):
for col in range(1):
assert (
target.cell(row, col).get_text() == source.cell(row + 9, col).get_text()
)
assert test.get_log_errors() == []
@pytest.mark.skip(
reason="Prop `data_previous` is not correctly updated with copy+paste"
)
def test_tbcp005_copy_multiple_rows_and_columns(test):
test.start_server(get_app())
target = test.table("table")
target.cell(0, 1).click()
with test.hold(Keys.SHIFT):
target.cell(2, 2).click()
test.copy()
target.cell(3, 1).click()
test.paste()
for row in range(3):
for col in range(1, 3):
assert (
target.cell(row + 3, col).get_text() == target.cell(row, col).get_text()
)
assert test.get_log_errors() == []
@pytest.mark.skip(
reason="Prop `data_previous` is not correctly updated with copy+paste"
)
def test_tbcp006_copy_paste_between_tables(test):
test.start_server(get_app())
source = test.table("table")
target = test.table("table2")
source.cell(10, 0).click()
with test.hold(Keys.SHIFT):
source.cell(13, 3).click()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(4):
for col in range(4):
assert (
source.cell(row + 10, col).get_text()
== target.cell(row, col).get_text()
)
assert test.get_log_errors() == []
@pytest.mark.skip(
reason="Prop `data_previous` is not correctly updated with copy+paste"
)
def test_tbcp007_copy_paste_with_hidden_column(test):
test.start_server(get_app())
target = test.table("table")
target.column("Complaint ID").hide()
target.cell(0, 0).click()
with test.hold(Keys.SHIFT):
target.cell(2, 2).click()
test.copy()
target.cell(3, 1).click()
test.paste()
for row in range(3):
for col in range(3):
assert (
target.cell(row, col).get_text()
== target.cell(row + 3, col + 1).get_text()
)
assert test.get_log_errors() == []
@pytest.mark.skip(
reason="Prop `data_previous` is not correctly updated with copy+paste"
)
def test_tbcp008_copy_paste_between_tables_with_hidden_columns(test):
test.start_server(get_app())
target = test.table("table")
target.column("Complaint ID").hide()
target.cell(10, 0).click()
with test.hold(Keys.SHIFT):
target.cell(13, 2).click()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(4):
for col in range(3):
assert (
target.cell(row + 10, col).get_text()
== target.cell(row, col).get_text()
)
assert test.get_log_errors() == []
def test_tbcp009_copy_9_and_10_click(test):
test.start_server(get_app())
source = test.table("table")
target = test.table("table2")
source.cell(9, 0).click()
with test.hold(Keys.SHIFT):
source.cell(10, 0).click()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(2):
for col in range(1):
assert (
target.cell(row, col).get_text() == source.cell(row + 9, col).get_text()
)
assert test.get_log_errors() == []
def test_tbcp010_copy_from_unselectable_cells_table(test):
test.start_server(get_app())
source = test.table("table3") # this table has cell_selectable=False
target = test.table("table2")
# double click cell to (natively) mark the contained text
source.cell(2, 2).double_click()
assert source.cell(2, 2).get_text() == test.get_selected_text()
# copy the source text to clipboard using CTRL+C or COMMAND+C
test.copy()
# assert the target cell value is different before paste
target.cell(1, 1).click()
assert target.cell(1, 1).get_text() != source.cell(2, 2).get_text()
# assert the target cell value has changed to the pasted value
test.paste()
assert target.cell(1, 1).get_text() == source.cell(2, 2).get_text()
assert test.get_log_errors() == []
def test_tbcp011_copy_double_quotes(test):
test.start_server(get_app())
source = test.table("table4")
target = test.table("table2")
source.cell(0, 0).click()
with test.hold(Keys.SHIFT):
source.cell(0, 1).click()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(1):
for col in range(2):
assert target.cell(row, col).get_text() == source.cell(row, col).get_text()
assert test.get_log_errors() == []
def test_tbcp011_copy_multiline(test):
test.start_server(get_app())
source = test.table("table4")
target = test.table("table2")
source.cell(1, 0).click()
with test.hold(Keys.SHIFT):
source.cell(1, 1).click()
test.copy()
target.cell(1, 0).click()
test.paste()
for row in range(1, 2):
for col in range(2):
assert target.cell(row, col).get_text() == source.cell(row, col).get_text()
assert test.get_log_errors() == []
| {
"content_hash": "fd2c74fb721ace55c53d7b038b860ce2",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 88,
"avg_line_length": 26.535714285714285,
"alnum_prop": 0.5579696212266871,
"repo_name": "plotly/dash",
"id": "10076cc97ea9faf6c81e9e867abf7ee40ac2ac09",
"size": "10402",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "components/dash-table/tests/selenium/test_basic_copy_paste.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17191"
},
{
"name": "HTML",
"bytes": "1729"
},
{
"name": "JavaScript",
"bytes": "638735"
},
{
"name": "Less",
"bytes": "22320"
},
{
"name": "Python",
"bytes": "1304969"
},
{
"name": "Shell",
"bytes": "224"
},
{
"name": "TypeScript",
"bytes": "840257"
}
],
"symlink_target": ""
} |
import warnings
import functools
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function %(funcname)s." % {
'funcname': func.__name__,
},
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
## Usage examples ##
@deprecated
def my_func():
pass
@deprecated
def my_func2():
pass
print my_func()
print my_func2() | {
"content_hash": "2801f8645f4ee330bcd4dcbc591ef232",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 62,
"avg_line_length": 22.58823529411765,
"alnum_prop": 0.6106770833333334,
"repo_name": "t10471/python",
"id": "fe7c30e219ccc2239e09233a3110bbf9c9a7574a",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "practice/src/decorator/def/deprecation_warnings2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "592"
},
{
"name": "Python",
"bytes": "243645"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# import frappe
import unittest
class TestWorkspace(unittest.TestCase):
pass
| {
"content_hash": "8563e6b859958fa3b7e52defbd580c22",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 17.142857142857142,
"alnum_prop": 0.7916666666666666,
"repo_name": "saurabh6790/frappe",
"id": "7a3f122ee2042015d63f76eb9a633c83dfa38a97",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/desk/doctype/workspace/test_workspace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63276"
},
{
"name": "HTML",
"bytes": "218921"
},
{
"name": "JavaScript",
"bytes": "2152738"
},
{
"name": "Less",
"bytes": "36947"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3261616"
},
{
"name": "SCSS",
"bytes": "223084"
},
{
"name": "Shell",
"bytes": "3358"
},
{
"name": "Vue",
"bytes": "49860"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SummaryOrganizationConfig(AppConfig):
label = 'summary_organization'
name = 'cobra.apps.organization.summary'
verbose_name = _('Summary Organization')
| {
"content_hash": "10615f899514cfd0b0c2434ed1a8e368",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 32.625,
"alnum_prop": 0.7662835249042146,
"repo_name": "lyoniionly/django-cobra",
"id": "4d956ee5c6c1c4d4d770dc2bf951f0b997386000",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cobra/apps/organization/summary/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "745958"
},
{
"name": "HTML",
"bytes": "254436"
},
{
"name": "JavaScript",
"bytes": "2679541"
},
{
"name": "Python",
"bytes": "1440198"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from django_sqs.views import parse_attributes
ATTRIBUTES_DICT = [
{ u'ApproximateNumberOfMessagesNotVisible': u'0',
u'MessageRetentionPeriod': u'345600',
u'LastModifiedTimestamp': u'1303884119',
u'MaximumMessageSize': u'8192',
u'CreatedTimestamp': u'1303884119',
u'ApproximateNumberOfMessages': u'0',
u'VisibilityTimeout': u'30',
u'QueueArn': u'arn:aws:sqs:ap-southeast-1:432461735093:cn-deal-announce', },
{u'ApproximateNumberOfMessagesNotVisible': u'0',
u'MessageRetentionPeriod': u'345600',
u'LastModifiedTimestamp': u'1303883653',
u'MaximumMessageSize': u'8192',
u'CreatedTimestamp': u'1303883653',
u'ApproximateNumberOfMessages': u'1',
u'VisibilityTimeout': u'30',
u'QueueArn': u'arn:aws:sqs:ap-southeast-1:432461735093:cn-deal-announcedev', }
]
class TestQueue(object):
def __init__(self, n):
self._name = n
class SQSTest(TestCase):
def setUp(self):
self.attributes_dict = ATTRIBUTES_DICT
def test_parse_attributes(self):
queues = [
TestQueue('q1'), TestQueue('q2'),
]
i = 0
qas = {}
for queue in queues:
qas[queue] = self.attributes_dict[i]
i += 1
parse_attributes(qas)
for queue in queues:
self.assertNotEqual(None, queue.created)
self.assertTrue(queue.name in ['q1', 'q2'])
| {
"content_hash": "9f983656c661f9e2c1b1dade84035da3",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 98,
"avg_line_length": 34.660377358490564,
"alnum_prop": 0.497550353837779,
"repo_name": "mjallday/Django-SQS",
"id": "ecbba23940c68801b3cc437d01c67c33c68f3a34",
"size": "1837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_sqs/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6424"
}
],
"symlink_target": ""
} |
from .collections import AsyncCollections, Collections
| {
"content_hash": "83767afbd5ac476c1b36013ad917f0b8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 54,
"avg_line_length": 55,
"alnum_prop": 0.8727272727272727,
"repo_name": "GetStream/stream-python",
"id": "8264c83b67f6e38ad3623e06f4fc888912635d6f",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stream/collections/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1111"
},
{
"name": "Makefile",
"bytes": "883"
},
{
"name": "Python",
"bytes": "192922"
},
{
"name": "Shell",
"bytes": "2783"
}
],
"symlink_target": ""
} |
import sys
import random
class PartitionEnum:
def __init__(self, n, k):
self.n = n
self.k = k
self.reset();
def reset(self):
self.hasNext = (self.k <= self.n)
self.sub = None
self.index = 0 # Number of partitions that have been generated so far
self.nAlone = True
self.counter = 0
def getRandom(self):
order = range(1, self.n + 1)
# Randomly order the numbers
random.shuffle(order)
partition = [[] for i in xrange(self.k)]
for i in xrange(self.n):
part = None
if i < self.k:
# Place first k in their own parts
part = i
else:
# Randomly assign other vertices
part = random.randint(0, self.k - 1)
partition[part].append(order[i])
return partition
def numPartitions(self):
"""Compute number of k-partitions of [n] (i.e., Stirling number of 2nd kind) via dynamic programming. Takes O(n) time and O(n) space."""
table = [0];
for n in xrange(1, self.n + 1):
table.append(1)
# n == len(table) - 1
for k in reversed(xrange(2, n)):
table[k] = table[k - 1] + k * table[k]
return table[self.k]
def getNext(self):
if not self.hasNext:
return None
self.index += 1
# Base cases
if self.k == 1:
self.hasNext = False
return [[i + 1 for i in xrange(self.n)]]
if self.k == self.n:
self.hasNext = False
return [[i + 1] for i in xrange(self.n)]
# First, generate the partitions in which n is by itself
if self.nAlone:
# Very first time
if self.sub == None:
self.sub = PartitionEnum(self.n - 1, self.k - 1)
if self.sub.hasNext:
partition = self.sub.getNext()
partition.append([self.n])
return partition
else:
self.nAlone = False
self.sub = PartitionEnum(self.n - 1, self.k)
# Done with partitions in which n is by itself
partition = self.sub.getNext()
# partition should not be None
assert partition != None
partition[self.counter].append(self.n)
if not self.sub.hasNext:
if self.counter < self.k - 1:
self.counter += 1
self.sub = PartitionEnum(self.n - 1, self.k)
else:
self.hasNext = False
return partition
def main():
enum = PartitionEnum(int(sys.argv[1]), int(sys.argv[2]))
while enum.hasNext:
print enum.getNext()
assert enum.index == enum.numPartitions()
if __name__ == "__main__":
main()
| {
"content_hash": "e83e9d0dd9b7921cfe6a487e38958a65",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 145,
"avg_line_length": 29.04040404040404,
"alnum_prop": 0.504,
"repo_name": "smpcole/partition-enum",
"id": "f3c9996283bf5575304d3640c8c854c29dd97529",
"size": "2875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PartitionEnum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M",
"bytes": "1844"
},
{
"name": "Python",
"bytes": "1866"
}
],
"symlink_target": ""
} |
"""The Netatmo integration."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from . import api, config_flow
from .const import AUTH, DATA_PERSONS, DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["binary_sensor", "camera", "climate", "sensor"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Netatmo component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_PERSONS] = {}
if DOMAIN not in config:
return True
config_flow.NetatmoFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Netatmo from a config entry."""
implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
hass.data[DOMAIN][entry.entry_id] = {
AUTH: api.ConfigEntryNetatmoAuth(hass, entry, implementation)
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| {
"content_hash": "67cd97d94f8b7b884843402eb8e0a11c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 90,
"avg_line_length": 26.930232558139537,
"alnum_prop": 0.6373056994818653,
"repo_name": "Teagan42/home-assistant",
"id": "ace12d3838cadac7c20081708772e97c5bb6c753",
"size": "2316",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/netatmo/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from openslides.utils.plugins import get_all_plugin_urlpatterns
urlpatterns = get_all_plugin_urlpatterns()
urlpatterns += [
url(r"^core/", include("openslides.core.urls")),
url(r"^users/", include("openslides.users.urls")),
]
| {
"content_hash": "f4ca33689365c852e851057fa5043cb8",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 25.454545454545453,
"alnum_prop": 0.7214285714285714,
"repo_name": "emanuelschuetze/OpenSlides",
"id": "94860e0da22179388f50dbb326db402b38f490d1",
"size": "280",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openslides/urls_apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67595"
},
{
"name": "Dockerfile",
"bytes": "853"
},
{
"name": "HTML",
"bytes": "312746"
},
{
"name": "JavaScript",
"bytes": "116454"
},
{
"name": "Python",
"bytes": "966018"
},
{
"name": "Smarty",
"bytes": "3882"
},
{
"name": "TypeScript",
"bytes": "1747309"
}
],
"symlink_target": ""
} |
'''
Numerical example for the Continuous No-Regret Algorithm with
Quadratic loss functions
@author: Maximilian Balandat, Walid Krichene
@date: Dec 20, 2014
'''
from ContNoRegret.Domains import S
from ContNoRegret.Distributions import Uniform
from ContNoRegret.LossFunctions import QuadraticLossFunction
from ContNoRegret.HedgeAlgorithm import QuadraticNoRegretProblem
from ContNoRegret.utils import create_random_Q, compute_etaopt, plot_results
import ContNoRegret.utils
# set up some basic parameters
T = 10000
M = 10.0
Lbnd = 5.0 # Uniform bound on the Lipschitz constant
N = 2500
# domain is an 'S'
dom = S()
# create random means, uniformly over the domain
mus = Uniform(dom).sample(T)
# create random Q matrices, based on the Lipschitz bound and the uniform bound M
Qs = [create_random_Q(dom, mu, Lbnd, M) for mu in mus]
# create list of loss functions
lossfuncs = [QuadraticLossFunction(dom, mu, Q, 0.0) for mu,Q in zip(mus,Qs)]
# Create quadratic problem
quadprob = QuadraticNoRegretProblem(dom, lossfuncs, Lbnd, M)
# run the problem for diffeerent constant rates etas
etaopts = {}
Ts = [1500, 10000]
for T in Ts:
etaopts[T] = compute_etaopt(dom, M, T)
# run the problem for different alphas and thetas
alphas = [0.15, 0.3]
thetas = [0.25, 0.25]
# also fix some other value of eta
etas = [0.2]
results = quadprob.run_simulation(N, etas=etas,
etaopts=etaopts,
alphas=alphas, thetas=thetas,
Ngrid=500000)
ContNoRegret.utils.plot_results(results, offset=1000, filename='figures/Quad_both_S')
slopes, slopes_bnd = results.estimate_loglog_slopes()
| {
"content_hash": "6f24e34ae7c5d00c70aec70419fe92ff",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 85,
"avg_line_length": 32.705882352941174,
"alnum_prop": 0.7128297362110312,
"repo_name": "Balandat/cont_no_regret",
"id": "9fcd72044c0ca8419669c0ac5f340c420545f786",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_code/QuadLoss_ex_both.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3557"
},
{
"name": "Python",
"bytes": "379837"
}
],
"symlink_target": ""
} |
import test
print(test.factorial(10))
| {
"content_hash": "7fb6742f68f2bcc149de61149bf2b9cf",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 25,
"avg_line_length": 19.5,
"alnum_prop": 0.7692307692307693,
"repo_name": "GongYiLiao/Python_Daily",
"id": "d5992148b545e7e84a74d5e151243992db66c4e9",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2013/May/1/test_f95.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2594"
},
{
"name": "C++",
"bytes": "745"
},
{
"name": "FORTRAN",
"bytes": "239"
},
{
"name": "HTML",
"bytes": "1213"
},
{
"name": "Jupyter Notebook",
"bytes": "4379"
},
{
"name": "Makefile",
"bytes": "912"
},
{
"name": "Python",
"bytes": "197258"
},
{
"name": "TeX",
"bytes": "37325"
}
],
"symlink_target": ""
} |
from .behaviors.player import Player
from .shaders import ShaderProgram
from .collisions import Collidables, CollidableCategories
from nytram.entity import Entity
from nytram.renderers import EntityRenderer
from nytram.ext.box2d import PlayerBody, Fixture, BodyTypes, Box, Filter
from nytram.ext.box2d.collisions import Collider, CollisionRegistration
from nytram.ext.box2d.movement import Axis, Movement, InstantVelocity
class Paddle:
""" Helper to build Paddle Entities """
renderer = None
@classmethod
def loadRenderer(cls):
""" Load the renderer """
cls.renderer = EntityRenderer(ShaderProgram, elements=[0,2,1,3,0,1], vertexData={0:[.5, 2.5, 0, -.5, -2.5, 0, .5, -2.5, 0, -.5, 2.5, 0]})
@classmethod
def build(cls, scene, position, upKey, downKey):
""" Build the Entity """
entity = Entity(scene, renderer=cls.renderer)
dynamicFixture = Fixture(Box(1, 5), density=1, restitution=0, friction=0, isSensor=False, filter=Filter(categoryBits=CollidableCategories.DynamicPaddle, maskBits=~CollidableCategories.Ball))
kinematicFixture = Fixture(Box(1, 5), density=1, restitution=0, friction=0, isSensor=False, filter=Filter(categoryBits=CollidableCategories.KinematicPaddle, maskBits=CollidableCategories.Ball))
entity.body = PlayerBody([dynamicFixture], [kinematicFixture], fixedRotation=True)
entity.transform.position = position
entity.collider = Collider([kinematicFixture], {CollisionRegistration(Collidables.Ball, Collidables.Wall, actsAs=Collidables.Wall)})
entity.movement = Movement({"Up":[0,1], "Down":[0,-1]}, InstantVelocity(5, axis=Axis.Vertical))
entity.player = Player(upKey, downKey)
return entity | {
"content_hash": "23381bbdc168fbc74b6cdc98aa82b151",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 201,
"avg_line_length": 57.193548387096776,
"alnum_prop": 0.7117879300620418,
"repo_name": "cloew/NytramPong",
"id": "6858ce207d49685945a244032c4c48e6975b3261",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pong/paddle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "257"
},
{
"name": "Python",
"bytes": "11910"
}
],
"symlink_target": ""
} |
import time
def speaker(speaker):
print('\n'+speaker+': ', end='')
time.sleep(.2)
| {
"content_hash": "eefcbbece64dde2ad21e2ff11aea9f77",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 18.2,
"alnum_prop": 0.5824175824175825,
"repo_name": "Indmind/Jomblo-Story",
"id": "43011e93183e1ed5d84e7f675108266cfec6484b",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5623"
}
],
"symlink_target": ""
} |
__author__ = 'croxis'
import sandbox
from direct.directnotify.DirectNotify import DirectNotify
log = DirectNotify().newCategory("SpaceDrive-ClientNet")
#PROPOSAL! {server entity id: client entity id} and reverse lookup dict too
class ClientNetworkSystem(sandbox.UDPNetworkSystem):
def init2(self):
self.packetCount = 0
self.accept('login', self.sendLogin)
self.accept('requestStations', self.requestStations)
self.accept('requestThrottle', self.requestThrottle)
self.accept('requestCreateShip', self.requestCreateShip)
self.accept('requestTarget', self.requestTarget)
def process_packet(self, msgID, remotePacketCount, ack, acks, hashID, serialized, address):
#If not in our protocol range then we just reject
if msgID < 0 or msgID > 200:
return
data = protocol.readProto(msgID, serialized)
if msgID == protocol.CONFIRM_STATIONS:
sandbox.send('shipUpdate', [data, True])
sandbox.send('setShipID', [data])
sandbox.send('makeStationUI', [data])
elif msgID == protocol.PLAYER_SHIPS:
sandbox.send('shipUpdates', [data])
sandbox.send('shipSelectScreen', [data])
elif msgID == protocol.POS_PHYS_UPDATE:
sandbox.send('shipUpdates', [data])
elif msgID == protocol.SHIP_CLASSES:
sandbox.send('shipClassList', [data])
def sendLogin(self, serverAddress):
self.serverAddress = serverAddress
datagram = self.generateGenericPacket(protocol.LOGIN)
universals.log.debug("sending login")
self.send(datagram)
def requestCreateShip(self, shipName, className):
datagram = protocol.requestCreateShip(shipName, className)
self.send(datagram)
def requestStations(self, shipid, stations):
datagram = protocol.requestStations(shipid, stations)
self.send(datagram)
def requestThrottle(self, throttle, heading):
datagram = protocol.requestThrottle(throttle, heading)
self.send(datagram)
def requestTarget(self, targetID):
datagram = protocol.requestTurretTarget(targetID)
self.send(datagram)
def send(self, datagram):
self.send_data(datagram, self.serverAddress)
class ServerComponent:
"""Theoretical component for server generated and sent entities"""
serverEntityID = 0
lastServerUpdate = 0
| {
"content_hash": "149e92d5a4f073a289b16b398432f17a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 95,
"avg_line_length": 37.25373134328358,
"alnum_prop": 0.6574519230769231,
"repo_name": "croxis/SpaceDrive",
"id": "345b0d67675d3b37fb83e1c39acaddf1dc7fd03f",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacedrive/networking/client_networking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "21897"
},
{
"name": "C++",
"bytes": "165025"
},
{
"name": "GLSL",
"bytes": "741524"
},
{
"name": "Groff",
"bytes": "119"
},
{
"name": "Python",
"bytes": "1523574"
}
],
"symlink_target": ""
} |
from setuptools import setup
SCHEDULE_VERSION = '1.7.0'
SCHEDULE_DOWNLOAD_URL = (
'https://github.com/Cubewise-code/TM1py/tarball/' + SCHEDULE_VERSION
)
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='TM1py',
packages=['TM1py', 'TM1py/Exceptions', 'TM1py/Objects', 'TM1py/Services', 'TM1py/Utils'],
version=SCHEDULE_VERSION,
description='A python module for TM1.',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
author='Marius Wirtz',
author_email='MWirtz@cubewise.com',
url='https://github.com/cubewise-code/tm1py',
download_url=SCHEDULE_DOWNLOAD_URL,
keywords=[
'TM1', 'IBM Cognos TM1', 'Planning Analytics', 'PA', 'Cognos'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Natural Language :: English',
],
install_requires=[
'requests',
'pytz',
'requests_negotiate_sspi;platform_system=="Windows"',
'mdxpy'],
extras_require={
"pandas": ["pandas"]
},
python_requires='>=3.6',
)
| {
"content_hash": "daf5a56d10e7087ae2ddf9e0485447fa",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 93,
"avg_line_length": 31.177777777777777,
"alnum_prop": 0.6086956521739131,
"repo_name": "OLAPLINE/TM1py",
"id": "ad8db0607a861af5f2cb68c43a744c2569e9db4e",
"size": "1403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94131"
}
],
"symlink_target": ""
} |
def border(length):
print(length*"-")
print(length*"~")
print(length*"-")
# Function to print text surrounded by a border
def print_with_border(text, length):
border(length)
print(text)
print("intentionally wrong for testing purposes")
border(length)
def print_auto_border(text) :
border(len(text))
print(text)
border(len(text))
# Test Program
print_with_border("This is my program!", 25)
print_with_border("This is the end of my program.", 10)
| {
"content_hash": "62bad254a615e84eb6738b12f8a28160",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 55,
"avg_line_length": 24.4,
"alnum_prop": 0.6721311475409836,
"repo_name": "golfit/MEETY1UnitTesting",
"id": "3babd52f12a84771e1825e2001032041782c8e05",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day4.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5424"
},
{
"name": "Shell",
"bytes": "1213"
}
],
"symlink_target": ""
} |
"""
WSGI config for ganzige project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ganzige.settings")
application = get_wsgi_application()
| {
"content_hash": "ec354a7502ad384cd9b47ea04c7930a2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.4375,
"alnum_prop": 0.7698209718670077,
"repo_name": "kekehurry/ganzige.site",
"id": "47a3633dce9d57a235008fa04c750a2655a8229f",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ganzige/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "344199"
},
{
"name": "HTML",
"bytes": "28268"
},
{
"name": "JavaScript",
"bytes": "4486707"
},
{
"name": "Python",
"bytes": "15800"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
"""
Terrain Following Mesh
~~~~~~~~~~~~~~~~~~~~~~
Use a topographic surface to create a 3D terrain-following mesh.
Terrain following meshes are common in the environmental sciences, for instance
in hydrological modelling (see
`Maxwell 2013 <https://www.sciencedirect.com/science/article/abs/pii/S0309170812002564>`_
and
`ParFlow <https://parflow.org>`_).
In this example, we demonstrate a simple way to make a 3D grid/mesh that
follows a given topographic surface. In this example, it is important to note
that the given digital elevation model (DEM) is structured (gridded and not
triangulated): this is common for DEMs.
"""
# sphinx_gallery_thumbnail_number = 3
import pyvista as pv
import numpy as np
from pyvista import examples
###############################################################################
# Download a gridded topography surface (DEM)
dem = examples.download_crater_topo()
dem
###############################################################################
# Now let's subsample and extract an area of interest to make this example
# simple (also the DEM we just load is pretty big).
# Since the DEM we loaded is a :class:`pyvista.UniformGrid` mesh, we can use
# the :func:`pyvista.UniformGridFilters.extract_subset` filter:
subset = dem.extract_subset((500, 900, 400, 800, 0, 0), (5,5,1))
subset.plot(cpos="xy")
###############################################################################
# Now that we have a region of interest for our terrain following mesh, lets
# make a 3D surface of that DEM:
terrain = subset.warp_by_scalar()
terrain
###############################################################################
terrain.plot()
###############################################################################
# And now we have a 3D structured surface of the terrain! We can now extend
# that structured surface into a 3D mesh to form a terrain following grid.
# To do this, we first our cell spacings in the z-direction (these start
# from the terrain surface). Then we repeat the XYZ structured coordinates
# of the terrain mesh and decrease each Z level by our Z cell spacing.
# Once we have those structured coordinates, we can create a
# :class:`pyvista.StructuredGrid`.
z_cells = np.array([25]*5 + [35]*3 + [50]*2 + [75, 100])
xx = np.repeat(terrain.x, len(z_cells), axis=-1)
yy = np.repeat(terrain.y, len(z_cells), axis=-1)
zz = np.repeat(terrain.z, len(z_cells), axis=-1) - np.cumsum(z_cells).reshape((1, 1, -1))
mesh = pv.StructuredGrid(xx, yy, zz)
mesh["Elevation"] = zz.ravel(order="F")
mesh
###############################################################################
cpos = [(1826736.796308761, 5655837.275274233, 4676.8405505181745),
(1821066.1790519988, 5649248.765538796, 943.0995128226014),
(-0.2797856225380979, -0.27966946337594883, 0.9184252809434081)]
mesh.plot(show_edges=True, lighting=False, cpos=cpos)
| {
"content_hash": "90ecc43ad084d846e724973ba4642c6b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 89,
"avg_line_length": 40.02777777777778,
"alnum_prop": 0.6155447605829285,
"repo_name": "akaszynski/vtkInterface",
"id": "c0f7ee60908c9a18064ec624ebda8a40f1aa5171",
"size": "2882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/00-load/terrain-mesh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "184915"
}
],
"symlink_target": ""
} |
"""OmniNet models utilities."""
import jax.numpy as jnp
def grid_restack(all_vecs):
"""Stack layers with respect to the grid shape of positions.
Given multiple sequences (lists) of batch x len x dim reshape this such
that all positions are side by side.
for example (for illustrative purposes):
inputs: [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]
outputs: [1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12]
Args:
all_vecs: list of sequences of batch x len x dim
Returns:
Array of batch x (length x num_items) x dim.
"""
cat_output = []
for pos in range(all_vecs[0].shape[1]):
pos_vecs = [x[:, None, pos, :] for x in all_vecs]
cat_output += pos_vecs
x2 = jnp.concatenate(cat_output, 1)
return x2
| {
"content_hash": "13beaed93fe7b63cd2945c21aeafd6a4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 26.214285714285715,
"alnum_prop": 0.6335149863760218,
"repo_name": "google-research/scenic",
"id": "7f6776ffbf4d96a687b2d97687b77618f19c9c05",
"size": "734",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scenic/projects/omninet/model_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1717873"
},
{
"name": "Python",
"bytes": "3692184"
}
],
"symlink_target": ""
} |
from .commutative import Commutative
class Product(Commutative):
def __init__(self, *args):
super(Product, self).__init__(*self.simplified(*args))
def simplified(self, *args):
"""
Returns a sequence containing expressions that make a simplified Product.
Used when ``Product`` is initialized to simplify.
Uses ``self.exprs`` when no arguments are provided.
:type: args: int or Expression
:rtype: seq
"""
coefficient = 1
args = args or self._exprs
for arg in args:
if isinstance(arg, int):
# If any part is 0 the whole thing is 0
if arg == 0:
yield None
# 1 can be eliminated because 1 * x = x
if arg == 1:
continue
coefficient *= arg
else:
yield arg
if coefficient != 0:
yield coefficient
def __call__(self, val):
prod = 1
for expr in self._exprs:
prod *= self._val_of_exp(expr, val)
return prod
def degree(self):
"""
Returns total degree (ex degree x is 1, degree 3x^3 is 3) of product.
:rtype: int
"""
deg = 0
for expr in self._exprs:
deg += self._calc_degree(expr)
return deg
def order(self, ascending=True):
"""
Converts ''frozenset'' exprs into ''list'' ordered by degree.
:rtype: list
"""
return super(Product, self).order(ascending=True)
def same_base(self, other):
return isinstance(other, self.__class__) and \
self.rem_int() == other.rem_int()
def rem_int(self):
return frozenset([expr for expr in self._exprs if not isinstance(expr, int)])
def __str__(self):
return ''.join("{} * ".format(expr) for expr in self.order())[:-2] # Removes leftover *
def __mul__(self, other):
if not isinstance(other, self.__class__):
return Product(*self._exprs, other)
no_overlap = self._exprs.union(other.exprs) - self._exprs.intersection(other.exprs)
overlap = set([expr**2 for expr in self._exprs.intersection(other.exprs)])
return no_overlap.union(overlap)
def __pow__(self, power, modulo=None):
return Product(*[expr**power for expr in self._exprs])
| {
"content_hash": "a90dabace6b8c59a48e38e6f718324fb",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 95,
"avg_line_length": 30.2,
"alnum_prop": 0.5401490066225165,
"repo_name": "LordDarkula/polypy",
"id": "5a558f026954b5e3109bf54fd429538fb36ee50a",
"size": "2416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polypy/product.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10939"
}
],
"symlink_target": ""
} |
import sys
import os
import getopt
import pgdb
from pprint import pprint
schema_file = None
config_file = "/etc/planetlab/plc_config"
config = {}
exec(compile(open(config_file).read(), config_file, 'exec'), config)
def usage():
print("Usage: %s SCHEMA_FILE " % sys.argv[0])
sys.exit(1)
try:
schema_file = sys.argv[1]
except IndexError:
print("Error: too few arguments")
usage()
# all foreing keys exist as primary kyes in another table
# will represent all foreign keys as
# { 'table.foreign_key': 'table.primary_key'}
foreign_keys = {}
foreign_keys_ordered = []
zombie_keys = {}
# parse the schema for foreign keys
try:
file = open(schema_file, 'r')
index = 0
lines = file.readlines()
while index < len(lines):
line = lines[index].strip()
# find all created objects
if line.startswith("CREATE"):
line_parts = line.split(" ")
if line_parts[1:3] == ['OR', 'REPLACE']:
line_parts = line_parts[2:]
item_type = line_parts[1].strip()
item_name = line_parts[2].strip()
if item_type.upper() in ['TABLE']:
while index < len(lines):
index = index + 1
nextline =lines[index].strip()
if nextline.find("--") > -1:
nextline = nextline[0:nextline.index("--")].replace(',', '')
if nextline.upper().find("REFERENCES") > -1:
nextline_parts = nextline.split(" ")
foreign_key_name = nextline_parts[0].strip()
foreign_key_table = nextline_parts[nextline_parts.index("REFERENCES")+1].strip()
foreign_key = item_name + "."+ foreign_key_name
primary_key = foreign_key_table +"."+ foreign_key_name
foreign_keys[foreign_key] = primary_key
foreign_keys_ordered.append(foreign_key)
elif nextline.find(";") >= 0:
break
index = index + 1
except:
raise
db = pgdb.connect(user = config['PLC_DB_USER'],
database = config['PLC_DB_NAME'])
cursor = db.cursor()
try:
for foreign_key in foreign_keys_ordered:
primary_key = foreign_keys[foreign_key]
sql = "SELECT distinct %s from %s"
# get all foreign keys in this table
foreign_key_parts = foreign_key.split(".")
# do not delete from primary tables
if foreign_key_parts[0] in ['addresses', 'boot_states', 'conf_files', \
'keys', 'messages', 'nodegroups', 'interfaces', 'nodes', 'pcus', 'peers' \
'persons', 'roles', 'sessions', 'sites', 'slices']:
#print "skipping table %s" % foreign_key_parts[0]
continue
cursor.execute(sql % (foreign_key_parts[1], foreign_key_parts[0]))
foreign_rows = cursor.fetchall()
# get all the primary keys from this foreign key's primary table
primary_key_parts = primary_key.split(".")
# foreign key name may not match primary key name. must rename these
if primary_key_parts[1] == 'creator_person_id':
primary_key_parts[1] = 'person_id'
elif primary_key_parts[1] == 'min_role_id':
primary_key_parts[1] = 'role_id'
sql = sql % (primary_key_parts[1], primary_key_parts[0])
# determin which primary records are deleted
desc = os.popen('psql planetlab4 postgres -c "\d %s;"' % primary_key_parts[0])
result = desc.readlines()
if primary_key_parts[0] in ['slices']:
sql = sql + " where name not like '%_deleted'"
elif [line for line in result if line.find("deleted") > -1]:
sql = sql + " where deleted = false"
cursor.execute(sql)
primary_key_rows = cursor.fetchall()
# if foreign key isnt present in primay_key query, it either doesnt exist or marked as deleted
# also, ignore null foreign keys, not considered zombied
zombie_keys_func = lambda key: key not in primary_key_rows and not key == [None]
zombie_keys_list = [zombie_key[0] for zombie_key in filter(zombie_keys_func, foreign_rows)]
print(zombie_keys_list)
# delete these zombie records
if zombie_keys_list:
print(" -> Deleting %d zombie record(s) from %s after checking %s" % \
(len(zombie_keys_list), foreign_key_parts[0], primary_key_parts[0]))
sql_delete = 'DELETE FROM %s WHERE %s IN %s' % \
(foreign_key_parts[0], foreign_key_parts[1], tuple(zombie_keys_list))
cursor.execute(sql_delete)
db.commit()
#zombie_keys[foreign_key] = zombie_keys_list
print("done")
except pgdb.DatabaseError:
raise
| {
"content_hash": "68c70d327cbeb9e639fb40d6fa1c929f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 128,
"avg_line_length": 48.91596638655462,
"alnum_prop": 0.47294279333447864,
"repo_name": "dreibh/planetlab-lxc-plcapi",
"id": "acd3b1c412e5edffa78fd8a4e886fa0ea574e1ea",
"size": "5903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/dzombie.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "724"
},
{
"name": "Makefile",
"bytes": "2995"
},
{
"name": "PHP",
"bytes": "574445"
},
{
"name": "PLpgSQL",
"bytes": "2764"
},
{
"name": "Perl",
"bytes": "1350"
},
{
"name": "Python",
"bytes": "871238"
},
{
"name": "Shell",
"bytes": "31392"
}
],
"symlink_target": ""
} |
"""Test pywwa/__init__.py"""
# stdlin
import os
import shutil
# Local
import pywwa
def test_load_settings():
"""Test that we can load settings."""
if os.path.isfile("settings.json"):
shutil.move("settings.json", "settings.json.save")
res = pywwa.load_config()
assert isinstance(res, dict)
if os.path.isfile("settings.json.save"):
shutil.move("settings.json.save", "settings.json")
| {
"content_hash": "1d274f35d9c56dace3586e1328136dd6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 58,
"avg_line_length": 24.705882352941178,
"alnum_prop": 0.6523809523809524,
"repo_name": "akrherz/pyWWA",
"id": "4ba3e18001cdd9baee82fa3c3fa53315f5c36dd8",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "207659"
},
{
"name": "Shell",
"bytes": "4472"
}
],
"symlink_target": ""
} |
import serial
connected = False
ser = serial.Serial("/dev/tty.usbmodem411", 9600)
## loop
while not connected:
serin = ser.read()
connected = True
ser.write("1")
## done
while ser.read() == '1':
ser.read()
ser.close() | {
"content_hash": "63c5b85ee5e372ef957c9fc732caf2bd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 13.578947368421053,
"alnum_prop": 0.5775193798449613,
"repo_name": "baird/pycom",
"id": "e6c457424b6271891a34f1c939bfbecfc72d0be2",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "356"
},
{
"name": "Python",
"bytes": "258"
}
],
"symlink_target": ""
} |
'''
Created on 21/01/2014
@author: Dani
'''
from ConfigParser import ConfigParser
from es.weso.util.file_writer import FileWriter
import os
class FaostatIndicatorCatcher(object):
'''
classdocs
'''
def __init__(self):
self.config = ConfigParser()
self.config.read("../../../../config/configuration.ini")
'''
Constructor
'''
pass
def run(self):
wlist = self.read_csv_file_fields()
indicators_info = self.prepare_indicators_info(wlist)
FileWriter.write_text_to_file(indicators_info, self.config.get("INDICATOR_CATCHER", "result_file_path"))
def prepare_indicators_info(self, wlist):
result = "List of available indicators:\n"
for word in wlist:
result += "\n\t" + word
return result
def read_csv_file_fields(self):
wlist = []
csv_file = os.listdir(self.config.get("FAOSTAT", "data_file_path"))[0]
if csv_file[-4:] != '.csv':
raise RuntimeError("Unexpected content while looking for indicators. CSV file expected but {0} was found".format(csv_file))
content = open(self.config.get("FAOSTAT", "data_file_path") + "/" + csv_file).readlines()
for i in range(1,len(content)):
line = content[i].split(",\"")
interesting_word = line[3];
self.treat_interesting_word(wlist, interesting_word)
return wlist
def treat_interesting_word(self, wlist, word):
if not word in wlist:
wlist.append(word)
print word # Comment?
| {
"content_hash": "fecbdef5b33e0a31d1bebd894f7c65d4",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 135,
"avg_line_length": 29.69090909090909,
"alnum_prop": 0.5799142682180036,
"repo_name": "landportal/landbook-importers",
"id": "af449332d569d44b0447cab43ec03af4fd8ca33b",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FAOSTAT-Norm_Importer/es/weso/faostat/indicator_catcher/faostat_indicator_catcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "518503"
},
{
"name": "Shell",
"bytes": "15185"
}
],
"symlink_target": ""
} |
import django_filters
from dal import autocomplete
from django.utils.translation import gettext_lazy as _
from teryt_tree.dal_ext.filters import VoivodeshipFilter, CountyFilter, CommunityFilter
from .models import Institution
class InstitutionFilter(django_filters.FilterSet):
voivodeship = VoivodeshipFilter(
widget=autocomplete.ModelSelect2(url="teryt:voivodeship-autocomplete")
)
county = CountyFilter(
widget=autocomplete.ModelSelect2(
url="teryt:county-autocomplete", forward=["voivodeship"]
)
)
community = CommunityFilter(
widget=autocomplete.ModelSelect2(
url="teryt:community-autocomplete", forward=["county"]
)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters["name"].lookup_expr = "icontains"
self.filters["name"].label = _("Name")
widget = autocomplete.Select2Multiple(url="institutions:tag_autocomplete")
# TODO: Verify below on django-filter 2.2.0
self.filters["tags"].field.widget = widget
class Meta:
model = Institution
fields = ["name", "tags", "regon"]
| {
"content_hash": "64df11cf4226530f59f690d06ee71e5a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 87,
"avg_line_length": 34.529411764705884,
"alnum_prop": 0.666098807495741,
"repo_name": "watchdogpolska/feder",
"id": "28c85d4d6748e68b39c332b4c032c2e72b1d27be",
"size": "1174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feder/institutions/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624"
},
{
"name": "HTML",
"bytes": "183421"
},
{
"name": "JavaScript",
"bytes": "6245"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "Python",
"bytes": "574027"
},
{
"name": "SCSS",
"bytes": "40546"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
} |
from pyrope.server import application, service
from pyrope.examples.helloworld import HelloWorldApplication
from pyrope.examples.addressbook import AddressBookApplication
from pyrope.examples.widgets import WidgetsApplication
from pyrope.examples.ticker import TickerApplication
from pyrope.examples.chat import ChatApplication
#add in your apps
service.registerApplication(HelloWorldApplication())
service.registerApplication(AddressBookApplication())
service.registerApplication(WidgetsApplication())
service.registerApplication(TickerApplication())
service.registerApplication(ChatApplication())
#start up server
service.startup()
| {
"content_hash": "9a96006379c7ba6d8307227f51867d5b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 39.8125,
"alnum_prop": 0.869701726844584,
"repo_name": "rgravina/Pyrope",
"id": "0993750f9c7c4ba7c4f72ca4296c0a21dcc5b94f",
"size": "697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrope/examples/demo.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import pysplash
import rq
import redis
import uuid
import time
import random
log = pysplash.log.logger()
queue_name = str(uuid.uuid4())
def do_work():
# Let the pool spin up many workers
time.sleep(15)
start_time = time.time()
a = 2**2**30
end_time = time.time()
return end_time - start_time
if __name__ == "__main__":
pysplash.log.set_debug(True)
con = redis.StrictRedis()
with rq.Connection(con):
q = rq.Queue(queue_name)
for i in range(1000):
job = q.enqueue_call(do_work)
p = pysplash.Pool(
[queue_name], scale_frequency=2., zombie_timeout=30, max_per_scale=2)
log.info("Starting pool")
p.start()
| {
"content_hash": "fb65e7410719e1297a7caa6da23bfda2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 17.071428571428573,
"alnum_prop": 0.592747559274756,
"repo_name": "aelaguiz/pysplash",
"id": "5d99be72fb1b04b1776eb6f80351565acc94bf45",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/overload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22278"
}
],
"symlink_target": ""
} |
from django.contrib.gis.db import models as gis_models
from django.db import models
from poznaj.points.models import Point
class Story(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
duration = models.DurationField()
first_point = models.ForeignKey(Point, related_name='first_point', null=True)
points = models.ManyToManyField(Point)
objects = gis_models.GeoManager()
def __str__(self):
return 'Story: {}'.format(self.title)
def get_all_points(self):
return self.points.all()
| {
"content_hash": "6b18d05d2cf55fba9af956249ffdd2dc",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 81,
"avg_line_length": 28.55,
"alnum_prop": 0.7005253940455342,
"repo_name": "KlubJagiellonski/poznaj-app-backend",
"id": "cb3386aeb84061c92e92496ac127f1c30b9200ee",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poznaj/stories/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "Dockerfile",
"bytes": "533"
},
{
"name": "HTML",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "41829"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
} |
# objects hidden in the dictionary
class Property(object):
def __init__(self,key=None):
self._key = key
def __get__(self,obj,kls=None):
if obj is None: return self
else : return dict.__getitem__(obj,self._key)
def __set__(self,obj,val):
dict.__setitem__(obj,self._key,val)
def __delete__(self,obj):
dict.__delitem__(obj,self._key)
| {
"content_hash": "bbf119cd8a303df0f885f6c8a25ff65a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 31.615384615384617,
"alnum_prop": 0.5450121654501217,
"repo_name": "aerialhedgehog/VyPy",
"id": "c688c5f98cc79bc69c424e0b1ca5f18ddc0fd86d",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "trunk/VyPy/data/Property.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "737107"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
async def sample_get_annotation_spec():
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetAnnotationSpecRequest(
name="name_value",
)
# Make the request
response = await client.get_annotation_spec(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_async]
| {
"content_hash": "6f4a57c8b986e402912c4affc0e88f38",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 86,
"avg_line_length": 27.473684210526315,
"alnum_prop": 0.7394636015325671,
"repo_name": "googleapis/python-aiplatform",
"id": "66bc7074f92416a9a9487aa6ae5e14adfe450016",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
"""Methods for resolving JSON types during serialization."""
import datetime
import functools
from typing import Dict, List, NamedTuple, Optional, Tuple, TYPE_CHECKING
from cirq.protocols.json_serialization import ObjectFactory
if TYPE_CHECKING:
import cirq
import cirq.ops.pauli_gates
import cirq.devices.unconstrained_device
# Needed for backwards compatible named tuples of CrossEntropyResult
CrossEntropyPair = NamedTuple('CrossEntropyPair', [('num_cycle', int), ('xeb_fidelity', float)])
SpecklePurityPair = NamedTuple('SpecklePurityPair', [('num_cycle', int), ('purity', float)])
CrossEntropyResult = NamedTuple(
'CrossEntropyResult',
[
('data', List[CrossEntropyPair]),
('repetitions', int),
('purity_data', Optional[List[SpecklePurityPair]]),
],
)
CrossEntropyResultDict = NamedTuple(
'CrossEntropyResultDict', [('results', Dict[Tuple['cirq.Qid', ...], CrossEntropyResult])]
)
@functools.lru_cache()
def _class_resolver_dictionary() -> Dict[str, ObjectFactory]:
import cirq
from cirq.ops import raw_types
import pandas as pd
import numpy as np
from cirq.devices.noise_model import _NoNoiseModel
from cirq.experiments import GridInteractionLayer
from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata
def _boolean_hamiltonian_gate_op(qubit_map, boolean_strs, theta):
return cirq.BooleanHamiltonianGate(
parameter_names=list(qubit_map.keys()), boolean_strs=boolean_strs, theta=theta
).on(*qubit_map.values())
def _identity_operation_from_dict(qubits, **kwargs):
return cirq.identity_each(*qubits)
def single_qubit_matrix_gate(matrix):
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix, dtype=np.complex128)
return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))
def two_qubit_matrix_gate(matrix):
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix, dtype=np.complex128)
return cirq.MatrixGate(matrix, qid_shape=(2, 2))
def _cross_entropy_result(data, repetitions, **kwargs) -> CrossEntropyResult:
purity_data = kwargs.get('purity_data', None)
if purity_data is not None:
purity_data = [SpecklePurityPair(d, f) for d, f in purity_data]
return CrossEntropyResult(
data=[CrossEntropyPair(d, f) for d, f in data],
repetitions=repetitions,
purity_data=purity_data,
)
def _cross_entropy_result_dict(
results: List[Tuple[List['cirq.Qid'], CrossEntropyResult]], **kwargs
) -> CrossEntropyResultDict:
return CrossEntropyResultDict(results={tuple(qubits): result for qubits, result in results})
def _parallel_gate_op(gate, qubits):
return cirq.parallel_gate_op(gate, *qubits)
def _datetime(timestamp: float) -> datetime.datetime:
# We serialize datetimes (both with ("aware") and without ("naive") timezone information)
# as unix timestamps. The deserialized datetime will always refer to the
# same point in time, but will be re-constructed as a timezone-aware object.
#
# If `o` is a naive datetime, o != read_json(to_json(o)) because Python doesn't
# let you compare aware and naive datetimes.
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def _symmetricalqidpair(qids):
return frozenset(qids)
import sympy
return {
'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,
'AnyIntegerPowerGateFamily': cirq.AnyIntegerPowerGateFamily,
'AnyUnitaryGateFamily': cirq.AnyUnitaryGateFamily,
'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,
'BitFlipChannel': cirq.BitFlipChannel,
'BitstringAccumulator': cirq.work.BitstringAccumulator,
'BooleanHamiltonianGate': cirq.BooleanHamiltonianGate,
'CCNotPowGate': cirq.CCNotPowGate,
'CCXPowGate': cirq.CCXPowGate,
'CCZPowGate': cirq.CCZPowGate,
'Circuit': cirq.Circuit,
'CircuitOperation': cirq.CircuitOperation,
'ClassicallyControlledOperation': cirq.ClassicallyControlledOperation,
'ClassicalDataDictionaryStore': cirq.ClassicalDataDictionaryStore,
'CliffordGate': cirq.CliffordGate,
'CliffordState': cirq.CliffordState,
'CliffordTableau': cirq.CliffordTableau,
'CNotPowGate': cirq.CNotPowGate,
'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,
'ControlledGate': cirq.ControlledGate,
'ControlledOperation': cirq.ControlledOperation,
'CSwapGate': cirq.CSwapGate,
'CXPowGate': cirq.CXPowGate,
'CZPowGate': cirq.CZPowGate,
'CZTargetGateset': cirq.CZTargetGateset,
'DiagonalGate': cirq.DiagonalGate,
'DensePauliString': cirq.DensePauliString,
'DepolarizingChannel': cirq.DepolarizingChannel,
'DeviceMetadata': cirq.DeviceMetadata,
'Duration': cirq.Duration,
'FrozenCircuit': cirq.FrozenCircuit,
'FSimGate': cirq.FSimGate,
'GateFamily': cirq.GateFamily,
'GateOperation': cirq.GateOperation,
'Gateset': cirq.Gateset,
'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,
'GlobalPhaseGate': cirq.GlobalPhaseGate,
'GridDeviceMetadata': cirq.GridDeviceMetadata,
'GridInteractionLayer': GridInteractionLayer,
'GridParallelXEBMetadata': GridParallelXEBMetadata,
'GridQid': cirq.GridQid,
'GridQubit': cirq.GridQubit,
'HPowGate': cirq.HPowGate,
'ISwapPowGate': cirq.ISwapPowGate,
'IdentityGate': cirq.IdentityGate,
'InitObsSetting': cirq.work.InitObsSetting,
'KeyCondition': cirq.KeyCondition,
'KrausChannel': cirq.KrausChannel,
'LinearDict': cirq.LinearDict,
'LineQubit': cirq.LineQubit,
'LineQid': cirq.LineQid,
'LineTopology': cirq.LineTopology,
'Linspace': cirq.Linspace,
'ListSweep': cirq.ListSweep,
'MatrixGate': cirq.MatrixGate,
'MixedUnitaryChannel': cirq.MixedUnitaryChannel,
'MeasurementKey': cirq.MeasurementKey,
'MeasurementGate': cirq.MeasurementGate,
'MeasurementType': cirq.MeasurementType,
'_MeasurementSpec': cirq.work._MeasurementSpec,
'Moment': cirq.Moment,
'MutableDensePauliString': cirq.MutableDensePauliString,
'MutablePauliString': cirq.MutablePauliString,
'_NoNoiseModel': _NoNoiseModel,
'NamedQubit': cirq.NamedQubit,
'NamedQid': cirq.NamedQid,
'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,
'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,
'OpIdentifier': cirq.OpIdentifier,
'ParamResolver': cirq.ParamResolver,
'ParallelGate': cirq.ParallelGate,
'ParallelGateFamily': cirq.ParallelGateFamily,
'PauliInteractionGate': cirq.PauliInteractionGate,
'PauliMeasurementGate': cirq.PauliMeasurementGate,
'PauliString': cirq.PauliString,
'PauliStringPhasor': cirq.PauliStringPhasor,
'PauliStringPhasorGate': cirq.PauliStringPhasorGate,
'PauliSum': cirq.PauliSum,
'_PauliX': cirq.ops.pauli_gates._PauliX,
'_PauliY': cirq.ops.pauli_gates._PauliY,
'_PauliZ': cirq.ops.pauli_gates._PauliZ,
'PhaseDampingChannel': cirq.PhaseDampingChannel,
'PhaseFlipChannel': cirq.PhaseFlipChannel,
'PhaseGradientGate': cirq.PhaseGradientGate,
'PhasedFSimGate': cirq.PhasedFSimGate,
'PhasedISwapPowGate': cirq.PhasedISwapPowGate,
'PhasedXPowGate': cirq.PhasedXPowGate,
'PhasedXZGate': cirq.PhasedXZGate,
'Points': cirq.Points,
'Product': cirq.Product,
'ProductState': cirq.ProductState,
'ProductOfSums': cirq.ProductOfSums,
'ProjectorString': cirq.ProjectorString,
'ProjectorSum': cirq.ProjectorSum,
'QasmUGate': cirq.circuits.qasm_output.QasmUGate,
'_QubitAsQid': raw_types._QubitAsQid,
'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,
'QubitPermutationGate': cirq.QubitPermutationGate,
'RandomGateChannel': cirq.RandomGateChannel,
'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,
'ResetChannel': cirq.ResetChannel,
'Result': cirq.ResultDict, # Keep support for Cirq < 0.14.
'ResultDict': cirq.ResultDict,
'RoutingSwapTag': cirq.RoutingSwapTag,
'Rx': cirq.Rx,
'Ry': cirq.Ry,
'Rz': cirq.Rz,
'SingleQubitCliffordGate': cirq.SingleQubitCliffordGate,
'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,
'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,
'SqrtIswapTargetGateset': cirq.SqrtIswapTargetGateset,
'StabilizerStateChForm': cirq.StabilizerStateChForm,
'StatePreparationChannel': cirq.StatePreparationChannel,
'SumOfProducts': cirq.SumOfProducts,
'SwapPowGate': cirq.SwapPowGate,
'SympyCondition': cirq.SympyCondition,
'TaggedOperation': cirq.TaggedOperation,
'TensoredConfusionMatrices': cirq.TensoredConfusionMatrices,
'TiltedSquareLattice': cirq.TiltedSquareLattice,
'ThreeQubitDiagonalGate': cirq.ThreeQubitDiagonalGate,
'TrialResult': cirq.ResultDict, # keep support for Cirq < 0.11.
'TwoQubitDiagonalGate': cirq.TwoQubitDiagonalGate,
'TwoQubitGateTabulation': cirq.TwoQubitGateTabulation,
'_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,
'_Unit': cirq.study.sweeps._Unit,
'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,
'VirtualTag': cirq.VirtualTag,
'WaitGate': cirq.WaitGate,
# The formatter keeps putting this back
# pylint: disable=line-too-long
'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,
# pylint: enable=line-too-long
'_XEigenState': cirq.value.product_state._XEigenState,
'XPowGate': cirq.XPowGate,
'XXPowGate': cirq.XXPowGate,
'_YEigenState': cirq.value.product_state._YEigenState,
'YPowGate': cirq.YPowGate,
'YYPowGate': cirq.YYPowGate,
'_ZEigenState': cirq.value.product_state._ZEigenState,
'Zip': cirq.Zip,
'ZPowGate': cirq.ZPowGate,
'ZZPowGate': cirq.ZZPowGate,
# Old types, only supported for backwards-compatibility
'BooleanHamiltonian': _boolean_hamiltonian_gate_op, # Removed in v0.15
'CrossEntropyResult': _cross_entropy_result, # Removed in v0.16
'CrossEntropyResultDict': _cross_entropy_result_dict, # Removed in v0.16
'IdentityOperation': _identity_operation_from_dict,
'ParallelGateOperation': _parallel_gate_op, # Removed in v0.14
'SingleQubitMatrixGate': single_qubit_matrix_gate,
'SymmetricalQidPair': _symmetricalqidpair, # Removed in v0.15
'TwoQubitMatrixGate': two_qubit_matrix_gate,
'GlobalPhaseOperation': cirq.global_phase_operation, # Removed in v0.16
# not a cirq class, but treated as one:
'pandas.DataFrame': pd.DataFrame,
'pandas.Index': pd.Index,
'pandas.MultiIndex': pd.MultiIndex.from_tuples,
'sympy.Symbol': sympy.Symbol,
'sympy.Add': lambda args: sympy.Add(*args),
'sympy.Mul': lambda args: sympy.Mul(*args),
'sympy.Pow': lambda args: sympy.Pow(*args),
'sympy.GreaterThan': lambda args: sympy.GreaterThan(*args),
'sympy.StrictGreaterThan': lambda args: sympy.StrictGreaterThan(*args),
'sympy.LessThan': lambda args: sympy.LessThan(*args),
'sympy.StrictLessThan': lambda args: sympy.StrictLessThan(*args),
'sympy.Equality': lambda args: sympy.Equality(*args),
'sympy.Unequality': lambda args: sympy.Unequality(*args),
'sympy.Float': lambda approx: sympy.Float(approx),
'sympy.Integer': sympy.Integer,
'sympy.Rational': sympy.Rational,
'sympy.pi': lambda: sympy.pi,
'sympy.E': lambda: sympy.E,
'sympy.EulerGamma': lambda: sympy.EulerGamma,
'complex': complex,
'datetime.datetime': _datetime,
}
| {
"content_hash": "7b28d5ad2c793be574830357573ab080",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 102,
"avg_line_length": 46.95849056603774,
"alnum_prop": 0.6820154291224687,
"repo_name": "quantumlib/Cirq",
"id": "6a2d4f69c7cd502e0ffdee7e6c918592b3932b8a",
"size": "13028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/json_resolver_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
} |
import threading
import unittest
import mock
import numpy
import six
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
class TestFunction(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def setUp(self):
y1 = numpy.arange(4).astype(numpy.float32)
y2 = numpy.arange(4).astype(numpy.float32) + 1
gx1 = numpy.arange(3).astype(numpy.float32)
gx2 = None
gy1 = numpy.arange(4).astype(numpy.float32)
gy2 = numpy.arange(4).astype(numpy.float32)
f = chainer.Function()
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock(return_value=(y1, y2))
f.forward_gpu = mock.MagicMock()
f.backward_cpu = mock.MagicMock(return_value=(gx1, gx2))
f.backward_gpu = mock.MagicMock()
self.f = f
self.x1 = numpy.arange(3).astype(numpy.float32)
self.x2 = numpy.arange(3).astype(numpy.int32)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_gpu(self, device=0):
self.x1 = cuda.to_gpu(self.x1, device)
self.x2 = cuda.to_gpu(self.x2, device)
self.y1 = cuda.to_gpu(self.y1, device)
self.y2 = cuda.to_gpu(self.y2, device)
self.gx1 = cuda.to_gpu(self.gx1, device)
self.gx2 = None
self.gy1 = cuda.to_gpu(self.gy1, device)
self.gy2 = cuda.to_gpu(self.gy2, device)
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
self.f.backward_gpu = mock.MagicMock(return_value=(self.gx1, self.gx2))
def check_forward(self, gpu):
y1, y2 = self.f.forward((self.x1, self.x2))
self.assertEqual(self.f.check_type_forward.call_count, 0)
self.assertEqual(self._get_method('forward', not gpu).call_count, 0)
self._get_method('forward', gpu).assert_called_once_with(
(self.x1, self.x2))
self.assertTrue((cuda.to_cpu(y1) == cuda.to_cpu(self.y1)).all())
self.assertTrue((cuda.to_cpu(y2) == cuda.to_cpu(self.y2)).all())
def test_forward_cpu(self):
self.check_forward(False)
@attr.gpu
def test_forward_gpu(self):
self.setup_gpu()
self.check_forward(True)
def check_backward(self, gpu):
gx1, gx2 = self.f.backward((self.x1, self.x2), (self.gy1, self.gy2))
self.assertEqual(self._get_method('backward', not gpu).call_count, 0)
self._get_method('backward', gpu).assert_called_once_with(
(self.x1, self.x2), (self.gy1, self.gy2))
self.assertTrue((cuda.to_cpu(gx1) == cuda.to_cpu(self.gx1)).all())
self.assertIsNone(gx2)
def test_backward_cpu(self):
self.check_backward(False)
@attr.gpu
def test_backward_gpu(self):
self.setup_gpu()
self.check_backward(True)
def check_check_type_forward(self):
self.assertEqual(self.f.check_type_forward.call_count, 1)
ts = self.f.check_type_forward.call_args[0][0]
self.assertIsInstance(ts, type_check.LightTypeInfoTuple)
self.assertEqual(len(ts), 2)
t1 = ts[0]
self.assertEqual(t1.shape, (3,))
self.assertEqual(t1.dtype, numpy.float32)
t2 = ts[1]
self.assertEqual(t2.shape, (3,))
self.assertEqual(t2.dtype, numpy.int32)
def check_call(self, check_backward=False):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
x1._node._rank = 1
x2._node._rank = 3
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 4)
self.assertIs(y.creator, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator.outputs, tuple)
if check_backward:
ys[0].creator_node.backward((0, 1), (self.gy1, self.gy2))
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.setup_gpu()
self.check_call()
@attr.multi_gpu(2)
def test_call_another_gpu(self):
device = 1
self.setup_gpu(device)
test_case = self
def check_current_device(ret):
def meth(self, *args, **kwargs):
current_device = cuda.cupy.cuda.Device().id
test_case.assertEqual(current_device, device)
return ret
return meth
self.f.forward = check_current_device((self.y1, self.y2))
self.f.backward = check_current_device((self.gx1, self.gx2))
self.check_call(check_backward=True)
def check_call_all_ndarray(self):
x1 = self.x1
x2 = self.x2
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, type(x1))
self.assertFalse(y.requires_grad)
def test_call_all_ndarray_cpu(self):
self.check_call_all_ndarray()
@attr.gpu
def test_call_all_ndarray_gpu(self):
self.setup_gpu()
self.check_call_all_ndarray()
def check_call_ndarray(self):
x1 = chainer.Variable(self.x1)
x2 = self.x2
x1._node._rank = 1
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 2)
self.assertIs(y.creator, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator.outputs, tuple)
def test_call_ndarray_cpu(self):
self.check_call_ndarray()
@attr.gpu
def test_call_ndarray_gpu(self):
self.setup_gpu()
self.check_call_ndarray()
def check_call_single_return_value(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
ret = self.f(x1, x2)
self.assertIsInstance(ret, chainer.Variable)
def test_call_single_return_value_cpu(self):
self.f.forward_cpu.return_value = (cuda.to_cpu(self.y1),)
self.check_call_single_return_value()
@attr.gpu
def test_call_single_return_value_gpu(self):
self.setup_gpu()
self.f.forward_gpu.return_value = (cuda.to_gpu(self.y1),)
self.check_call_single_return_value()
def _get_f(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
y1, y2 = self.f(x1, x2)
f = y1.creator
# To test weak refernece, return only x1 and y1.
# x2 and y2 are deleted by the garbage collector
return f, x1, y1
def test_unchain(self):
f, _x1, _y1 = self._get_f()
y1, y2 = f.outputs
f.unchain()
# As _y1 is alive, this weak ref is also alive
y1_ref = y1()
self.assertIsNotNone(y1_ref)
self.assertIsNone(y1_ref.creator)
# This weak ref is dead by unchain
y2_ref = y2()
self.assertIsNone(y2_ref)
self.assertIsNone(f.inputs)
def test_label(self):
self.assertEqual(self.f.label, 'Function')
class TestFunctionBackwardIntegration(unittest.TestCase):
def test_backward(self):
x = chainer.Variable(numpy.array([1]), name='x')
y1 = F.identity(x)
y1.name = 'y1'
y2 = F.identity(x)
y2.name = 'y2'
z = y1 + y2
z.name = 'z'
z.grad = numpy.array([1])
z.backward(retain_grad=True)
self.assertEqual(y1.grad[0], 1)
self.assertEqual(y2.grad[0], 1)
self.assertEqual(x.grad[0], 2)
class TestFunctionInvalidType(unittest.TestCase):
def test_forward_invalid1(self):
class Function(chainer.Function):
def check_type_forward(self, in_types):
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
)
def forward(self, inputs):
return inputs
f = Function()
# OK
v = chainer.Variable(numpy.random.randn(1, 5).astype(numpy.float32))
result = f(v)
assert isinstance(result, chainer.Variable)
# Incorrect dtype
# in py3, numpy dtypes are represented as class
msg = """\
Invalid operation is performed in: Function \\(Forward\\)
Expect: in_types\\[0\\]\\.dtype == <(type|class) 'numpy\\.float32'>
Actual: float64 \\!= <(type|class) 'numpy\\.float32'>"""
v = chainer.Variable(numpy.random.randn(1, 5))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f(v)
# Incorrect dim
msg = """\
Invalid operation is performed in: Function \\(Forward\\)
Expect: in_types\\[0\\]\\.ndim >= 2
Actual: 1 < 2"""
v = chainer.Variable(numpy.random.randn(5).astype(numpy.float32))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f(v)
@testing.parameterize(
{'return_value': (numpy.array([float('nan')], numpy.float32),),
'valid': False},
{'return_value': (numpy.array([1], numpy.int32),), 'valid': True},
)
class TestFunctionForwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array([1], numpy.float32)
self.f = chainer.Function()
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_forward(self, x_data):
x = chainer.Variable(x_data)
if self.valid:
# check if forward throws nothing
self.f(x)
else:
with self.assertRaises(RuntimeError):
self.f(x)
def test_debug_forward_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=self.return_value)
self.check_debug_forward(self.one)
@attr.gpu
def test_debug_forward_gpu(self):
return_value = tuple(None if x is None else cuda.to_gpu(x)
for x in self.return_value)
self.f.forward_gpu = mock.MagicMock(return_value=return_value)
self.check_debug_forward(cuda.to_gpu(self.one))
@testing.parameterize(
{'return_value': (numpy.array([float('nan')], numpy.float32),),
'valid': False},
{'return_value': (None,), 'valid': True},
)
class TestFunctionBackwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array([1], numpy.float32)
self.f = chainer.Function()
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_backward(self, *xs_data):
xs = [chainer.Variable(x) for x in xs_data]
y = self.f(*xs)
if self.valid:
# check if backard throws nothing
y.backward()
else:
with self.assertRaises(RuntimeError):
y.backward()
def test_debug_backward_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=(self.one,))
self.f.backward_cpu = mock.MagicMock(return_value=self.return_value)
input_value = (self.one,) * len(self.return_value)
self.check_debug_backward(*input_value)
@attr.gpu
def test_debug_backward_gpu(self):
self.f.forward_gpu = mock.MagicMock(
return_value=(cuda.to_gpu(self.one),))
return_value = tuple(None if x is None else cuda.to_gpu(x)
for x in self.return_value)
input_value = (cuda.to_gpu(self.one),) * len(self.return_value)
self.f.backward_gpu = mock.MagicMock(return_value=return_value)
self.check_debug_backward(*input_value)
class TestNoBackpropMode(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.array([1.], 'f'))
def test_no_backprop_mode(self):
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.no_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
def test_force_backprop_mode(self):
with chainer.no_backprop_mode():
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
class MyThread(threading.Thread):
def run(self):
x = chainer.Variable(numpy.array([1], dtype='f'))
with chainer.no_backprop_mode():
y = x + 1
self.creator_is_none = y.creator is None
class TestBackpropModeMultiThread(unittest.TestCase):
def test_multi_thread(self):
t = MyThread()
t.start()
t.join()
self.assertTrue(t.creator_is_none)
class FunctionWithRetaining(chainer.Function):
def forward(self, inputs):
self.retain_inputs([1])
self.retain_outputs([1])
return inputs
def backward(self, inputs, grad_outputs):
self.backward_inputs = inputs
return grad_outputs
class TestFunctionRetaining(unittest.TestCase):
def setUp(self):
inputs = [chainer.Variable(numpy.array([1], dtype=numpy.float32)),
chainer.Variable(numpy.array([1], dtype=numpy.float32))]
self.input_data = [x.data for x in inputs]
self.input_nodes = [x.node for x in inputs]
self.f1 = FunctionWithRetaining()
outputs = self.f1(*inputs)
outputs[0].grad = numpy.array([1], dtype=numpy.float32)
outputs[0].backward()
self.f1_output_data = [y.data for y in outputs]
self.f1_output_nodes = [y.node for y in outputs]
inputs = None # release non-retained inputs
def test_retain_inputs(self):
self.assertEqual([x.data for x in self.input_nodes],
[None, self.input_data[1]])
self.assertEqual(tuple(x.data for x in self.input_nodes),
self.f1.backward_inputs)
def test_retain_outputs_f1(self):
self.assertEqual([y.data for y in self.f1_output_nodes],
[None, self.f1_output_data[1]])
self.assertEqual(tuple(y.data for y in self.f1_output_nodes),
self.f1.output_data)
testing.run_module(__name__, __file__)
| {
"content_hash": "3200fba0ad4c64a790a3643a2b4ed0d9",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 79,
"avg_line_length": 31.105906313645622,
"alnum_prop": 0.5881621161526878,
"repo_name": "kashif/chainer",
"id": "650c587a5e4682cbc547398da5e23b181c49d57f",
"size": "15273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/test_function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2730306"
}
],
"symlink_target": ""
} |
from os.path import exists
from setuptools import setup
import chest
setup(name='chest',
version=chest.__version__,
description='Simple on-disk dictionary',
url='http://github.com/mrocklin/chest/',
author='https://raw.github.com/mrocklin/chest/master/AUTHORS.md',
maintainer='Matthew Rocklin',
maintainer_email='mrocklin@gmail.com',
license='BSD',
keywords='dictionary out-of-core',
packages=['chest'],
long_description=(open('README.rst').read() if exists('README.rst')
else ''),
zip_safe=False)
| {
"content_hash": "233319205e071e21b7f650b40415b3b8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 34.588235294117645,
"alnum_prop": 0.6360544217687075,
"repo_name": "cpcloud/chest",
"id": "2397eb3749954de6e4903cb7bc855f15c538c221",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11688"
}
],
"symlink_target": ""
} |
""" Core functions for
- Uniform spanning trees
* :func:`ust_sampler_wilson`
* :func:`ust_sampler_aldous_broder`:
- Descent procresses :class:`Descent`:
* :func:`uniform_permutation`
- :class:`PoissonizedPlancherel` measure
* :func:`uniform_permutation`
* :func:`RSK`: Robinson-Schensted-Knuth correspondande
* :func:`xy_young_ru` young diagram -> russian convention coordinates
* :func:`limit_shape`
.. seealso:
`Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/exotic_dpps/index.html>`_
"""
import functools # used for decorators to pass docstring
import numpy as np
from itertools import chain # create graph edges from path
# For class PoissonizedPlancherel
from bisect import bisect_right # for RSK
from dppy.utils import check_random_state
def ust_sampler_wilson(list_of_neighbors, root=None,
random_state=None):
try:
import networkx as nx
except ImportError:
raise ValueError('The networkx package is required to sample spanning trees (see setup.py).')
rng = check_random_state(random_state)
# Initialize the tree
wilson_tree_graph = nx.Graph()
nb_nodes = len(list_of_neighbors)
# Initialize the root, if root not specified start from any node
n0 = root if root else rng.choice(nb_nodes) # size=1)[0]
# -1 = not visited / 0 = in path / 1 = in tree
state = -np.ones(nb_nodes, dtype=int)
state[n0] = 1
nb_nodes_in_tree = 1
path, branches = [], [] # branches of tree, temporary path
while nb_nodes_in_tree < nb_nodes: # |Tree| = |V| - 1
# visit a neighbor of n0 uniformly at random
n1 = rng.choice(list_of_neighbors[n0]) # size=1)[0]
if state[n1] == -1: # not visited => continue the walk
path.append(n1) # add it to the path
state[n1] = 0 # mark it as in the path
n0 = n1 # continue the walk
if state[n1] == 0: # loop on the path => erase the loop
knot = path.index(n1) # find 1st appearence of n1 in the path
nodes_loop = path[knot + 1:] # identify nodes forming the loop
del path[knot + 1:] # erase the loop
state[nodes_loop] = -1 # mark loopy nodes as not visited
n0 = n1 # continue the walk
elif state[n1] == 1: # hits the tree => new branch
if nb_nodes_in_tree == 1:
branches.append([n1] + path) # initial branch of the tree
else:
branches.append(path + [n1]) # path as a new branch
state[path] = 1 # mark nodes in path as in the tree
nb_nodes_in_tree += len(path)
# Restart the walk from a random node among those not visited
nodes_not_visited = np.where(state == -1)[0]
if nodes_not_visited.size:
n0 = rng.choice(nodes_not_visited) # size=1)[0]
path = [n0]
tree_edges = list(chain.from_iterable(map(lambda x: zip(x[:-1], x[1:]),
branches)))
wilson_tree_graph.add_edges_from(tree_edges)
return wilson_tree_graph
def ust_sampler_aldous_broder(list_of_neighbors, root=None,
random_state=None):
try:
import networkx as nx
except ImportError:
raise ValueError('The networkx package is required to sample spanning trees (see setup.py).')
rng = check_random_state(random_state)
# Initialize the tree
aldous_tree_graph = nx.Graph()
nb_nodes = len(list_of_neighbors)
# Initialize the root, if root not specified start from any node
n0 = root if root else rng.choice(nb_nodes) # size=1)[0]
visited = np.zeros(nb_nodes, dtype=bool)
visited[n0] = True
nb_nodes_in_tree = 1
tree_edges = np.zeros((nb_nodes - 1, 2), dtype=np.int)
while nb_nodes_in_tree < nb_nodes:
# visit a neighbor of n0 uniformly at random
n1 = rng.choice(list_of_neighbors[n0]) # size=1)[0]
if visited[n1]:
pass # continue the walk
else: # create edge (n0, n1) and continue the walk
tree_edges[nb_nodes_in_tree - 1] = [n0, n1]
visited[n1] = True # mark it as in the tree
nb_nodes_in_tree += 1
n0 = n1
aldous_tree_graph.add_edges_from(tree_edges)
return aldous_tree_graph
def uniform_permutation(N, random_state=None):
""" Draw a perputation :math:`\\sigma \\in \\mathfrak{S}_N` uniformly at random using Fisher-Yates' algorithm
.. seealso::
- `Fisher–Yates_shuffle <https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle>_
- `Numpy shuffle <https://github.com/numpy/numpy/blob/d429f0fe16c0407509b1f20d997bf94f1027f61b/numpy/random/mtrand.pyx#L4027>_`
"""
rng = check_random_state(random_state)
sigma = np.arange(N)
for i in range(N - 1, 0, -1): # reversed(range(1, N))
j = rng.randint(0, i + 1)
if j == i:
continue
sigma[j], sigma[i] = sigma[i], sigma[j]
# for i in range(N - 1):
# j = rng.randint(i, N)
# sigma[j], sigma[i] = sigma[i], sigma[j]
return sigma
def RSK(sequence):
"""Apply Robinson-Schensted-Knuth correspondence on a sequence of reals, e.g. a permutation, and return the corresponding insertion and recording tableaux.
:param sequence:
Sequence of real numbers
:type sequence:
array_like
:return:
:math:`P, Q` insertion and recording tableaux
:rtype:
list
.. seealso::
`RSK Wikipedia <https://en.wikipedia.org/wiki/Robinson%E2%80%93Schensted%E2%80%93Knuth_correspondence>`_
"""
P, Q = [], [] # Insertion/Recording tableau
for it, x in enumerate(sequence, start=1):
# Iterate along the rows of the tableau P to find a place for the bouncing x and record the position where it is inserted
for row_P, row_Q in zip(P, Q):
# If x finds a place at the end of a row of P
if x >= row_P[-1]:
row_P.append(x) # add the element at the end of the row of P
row_Q.append(it) # record its position in the row of Q
break
else:
# find place for x in the row of P to keep the row ordered
ind_insert = bisect_right(row_P, x)
# Swap x with the value in place
x, row_P[ind_insert] = row_P[ind_insert], x
# If no room for x at the end of any row of P create a new row
else:
P.append([x])
Q.append([it])
return P, Q
def xy_young_ru(young_diag):
""" Compute the xy coordinates of the boxes defining the young diagram, using the russian convention.
:param young_diag:
points
:type young_diag:
array_like
:return:
:math:`\\omega(x)`
:rtype:
array_like
"""
def intertwine(arr_1, arr_2):
inter = np.empty((arr_1.size + arr_2.size,), dtype=arr_1.dtype)
inter[0::2], inter[1::2] = arr_1, arr_2
return inter
# horizontal lines
x_hor = intertwine(np.zeros_like(young_diag), young_diag)
y_hor = np.repeat(np.arange(1, young_diag.size + 1), repeats=2)
# vertical lines
uniq, ind = np.unique(young_diag[::-1], return_index=True)
gaps = np.ediff1d(uniq, to_begin=young_diag[-1])
x_vert = np.repeat(np.arange(1, 1 + gaps.sum()), repeats=2)
y_vert = np.repeat(young_diag.size - ind, repeats=gaps)
y_vert = intertwine(np.zeros_like(y_vert), y_vert)
xy_young_fr = np.column_stack(
[np.hstack([x_hor, x_vert]), np.hstack([y_hor, y_vert])])
rot_45_and_scale = np.array([[1.0, -1.0],
[1.0, 1.0]])
return xy_young_fr.dot(rot_45_and_scale.T)
def limit_shape(x):
""" Evaluate :math:`\\omega(x)` the limit-shape function :cite:`Ker96`
.. math::
\\omega(x) =
\\begin{cases}
|x|, &\\text{if } |x|\\geq 2\\
\\frac{2}{\\pi} \\left(x \\arcsin\\left(\\frac{x}{2}\\right) + \\sqrt{4-x^2} \\right) &\\text{otherwise } \\end{cases}
:param x:
points
:type x:
array_like
:return:
:math:`\\omega(x)`
:rtype:
array_like
.. seealso::
- :func:`plot_diagram <plot_diagram>`
- :cite:`Ker96`
"""
w_x = np.zeros_like(x)
abs_x_gt2 = np.abs(x) >= 2.0
w_x[abs_x_gt2] = np.abs(x[abs_x_gt2])
w_x[~abs_x_gt2] = x[~abs_x_gt2] * np.arcsin(0.5 * x[~abs_x_gt2])\
+ np.sqrt(4.0 - x[~abs_x_gt2]**2)
w_x[~abs_x_gt2] *= 2.0 / np.pi
return w_x
| {
"content_hash": "691db9499acd7a2f74f69ef1c46513e6",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 159,
"avg_line_length": 30.43859649122807,
"alnum_prop": 0.5791354466858789,
"repo_name": "guilgautier/DPPy",
"id": "ee0f80f6724ef8639ae6dda8ef46e91df83701a5",
"size": "8692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dppy/exotic_dpps_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "408222"
},
{
"name": "Python",
"bytes": "441355"
},
{
"name": "Shell",
"bytes": "1694"
}
],
"symlink_target": ""
} |
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
from pyspark import SparkConf, SparkContext
from numpy import array
# Boilerplate Spark stuff:
conf = SparkConf().setMaster("local").setAppName("SparkDecisionTree")
sc = SparkContext(conf = conf)
# Some functions that convert our CSV input data into numerical
# features for each job candidate
def binary(YN):
if (YN == 'Y'):
return 1
else:
return 0
def mapEducation(degree):
if (degree == 'BS'):
return 1
elif (degree =='MS'):
return 2
elif (degree == 'PhD'):
return 3
else:
return 0
# Convert a list of raw fields from our CSV file to a
# LabeledPoint that MLLib can use. All data must be numerical...
def createLabeledPoints(fields):
yearsExperience = int(fields[0])
employed = binary(fields[1])
previousEmployers = int(fields[2])
educationLevel = mapEducation(fields[3])
topTier = binary(fields[4])
interned = binary(fields[5])
hired = binary(fields[6])
return LabeledPoint(hired, array([yearsExperience, employed,
previousEmployers, educationLevel, topTier, interned]))
#Load up our CSV file, and filter out the header line with the column names
rawData = sc.textFile("e:/sundog-consult/udemy/datascience/PastHires.csv")
header = rawData.first()
rawData = rawData.filter(lambda x:x != header)
# Split each line into a list based on the comma delimiters
csvData = rawData.map(lambda x: x.split(","))
# Convert these lists to LabeledPoints
trainingData = csvData.map(createLabeledPoints)
# Create a test candidate, with 10 years of experience, currently employed,
# 3 previous employers, a BS degree, but from a non-top-tier school where
# he or she did not do an internship. You could of course load up a whole
# huge RDD of test candidates from disk, too.
testCandidates = [ array([10, 1, 3, 1, 0, 0])]
testData = sc.parallelize(testCandidates)
# Train our DecisionTree classifier using our data set
model = DecisionTree.trainClassifier(trainingData, numClasses=2,
categoricalFeaturesInfo={1:2, 3:4, 4:2, 5:2},
impurity='gini', maxDepth=5, maxBins=32)
# Now get predictions for our unknown candidates. (Note, you could separate
# the source data into a training set and a test set while tuning
# parameters and measure accuracy as you go!)
predictions = model.predict(testData)
print('Hire prediction:')
results = predictions.collect()
for result in results:
print(result)
# We can also print out the decision tree itself:
print('Learned classification tree model:')
print(model.toDebugString())
| {
"content_hash": "430253ec52225a898b2cd8e85da813fd",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 82,
"avg_line_length": 35.48684210526316,
"alnum_prop": 0.7081942899517983,
"repo_name": "vadim-ivlev/STUDY",
"id": "61def5fc5352b86d515f8736df82982dff8be824",
"size": "2697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handson-data-science-python/DataScience-Python3/SparkDecisionTree.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "256"
},
{
"name": "CSS",
"bytes": "153975"
},
{
"name": "CoffeeScript",
"bytes": "167"
},
{
"name": "HTML",
"bytes": "2681792"
},
{
"name": "JavaScript",
"bytes": "2696471"
},
{
"name": "Jupyter Notebook",
"bytes": "2453649"
},
{
"name": "Kotlin",
"bytes": "63"
},
{
"name": "Mathematica",
"bytes": "349964"
},
{
"name": "PHP",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "716"
},
{
"name": "Prolog",
"bytes": "2424"
},
{
"name": "Python",
"bytes": "53057"
},
{
"name": "Ruby",
"bytes": "2213"
},
{
"name": "Shell",
"bytes": "1308"
},
{
"name": "Vue",
"bytes": "1776"
},
{
"name": "XSLT",
"bytes": "45170"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('monitor', '0007_auto_20170718_2235'),
]
operations = [
migrations.AlterField(
model_name='node',
name='mtp',
field=models.DateTimeField(default=datetime.datetime(2017, 7, 19, 5, 37, 1, 602506, tzinfo=utc)),
),
]
| {
"content_hash": "5d007faa7cb132cd8ce6f7566c9deba8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 109,
"avg_line_length": 25.3,
"alnum_prop": 0.6047430830039525,
"repo_name": "achow101/forkmon",
"id": "0880ee72ee5013533f8277e116e9dc126b97a4cb",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitor/migrations/0008_auto_20170718_2237.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8630"
},
{
"name": "Python",
"bytes": "41015"
}
],
"symlink_target": ""
} |
import ujson as json
import time
import base64
import crypto
RULE_KEY = 'rule'
EXPIRE_KEY = 'expire'
SIG_KEY = 'sig'
CAP_KEY = 'capability'
ONE_HOUR = 3600
def issue(rule):
'''
issue a signed capability object
'''
curr_time = int(time.time())
expire_time = curr_time + ONE_HOUR
cap_body = dict()
cap_body[RULE_KEY] = rule
cap_body[EXPIRE_KEY] = expire_time
cap_json = json.dumps(cap_body)
cap = base64.b64encode(crypto.sign_cap(cap_json))
return cap
def delegate():
pass
| {
"content_hash": "0848b2cf6eefdcad89229bd8863f2708",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 53,
"avg_line_length": 19.37037037037037,
"alnum_prop": 0.6443594646271511,
"repo_name": "dongting/sdnac",
"id": "b55af8018fef04193e8fdeaa708759458550cb66",
"size": "523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdnac/services/capability.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11365"
}
],
"symlink_target": ""
} |
import cherrypy
class HelloWorld:
def index(self):
return "Hello World!"
index.exposed = True
cherrypy.root = HelloWorld()
cherrypy.server.start()
| {
"content_hash": "790fb924af9e011739128bbcaa275651",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 29,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.6909090909090909,
"repo_name": "teamtachyon/Quillpad-Server",
"id": "f9e070c428c98f0814ba86e50b399454e874b618",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hellocherry.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "31654"
},
{
"name": "Python",
"bytes": "262170"
}
],
"symlink_target": ""
} |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicTelStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.tel/status_available.txt"
host = "whois.nic.tel"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, None)
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, "u34jedzcq.tel")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on, None)
| {
"content_hash": "d94182685ac2a08f29d60887b28554e3",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 31.14814814814815,
"alnum_prop": 0.6266349583828775,
"repo_name": "huyphan/pyyawhois",
"id": "63640b68a3a0588e91fb10f318e8e6866815839f",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_nic_tel_status_available.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
import unittest
from feedly.storage.redis.structures.hash import RedisHashCache,\
ShardedHashCache, FallbackHashCache
from feedly.storage.redis.structures.list import RedisListCache,\
FallbackRedisListCache
from feedly.storage.redis.connection import get_redis_connection
from functools import partial
from feedly.storage.redis.structures.sorted_set import RedisSortedSetCache
class BaseRedisStructureTestCase(unittest.TestCase):
def get_structure(self):
return
class RedisSortedSetTest(BaseRedisStructureTestCase):
test_data = [(1.0, 'a'), (2.0, 'b'), (3.0, 'c')]
def get_structure(self):
structure_class = RedisSortedSetCache
structure = structure_class('test')
structure.delete()
return structure
def test_add_many(self):
cache = self.get_structure()
test_data = self.test_data
for key, value in test_data:
cache.add(key, value)
# this shouldnt insert data, its a sorted set after all
cache.add_many(test_data)
count = cache.count()
self.assertEqual(int(count), 3)
def test_ordering(self):
cache = self.get_structure()
data = self.test_data
test_data = data
cache.add_many(test_data)
results = cache[:]
expected_results = [p[::-1] for p in test_data]
self.assertEqual(results, expected_results[::-1])
cache.sort_asc = True
results = cache[:10]
self.assertEqual(results, expected_results)
def test_trim(self):
cache = self.get_structure()
test_data = self.test_data
for score, value in test_data:
cache.add(score, value)
cache.trim(1)
count = cache.count()
self.assertEqual(count, 1)
def test_simple_trim(self):
cache = self.get_structure()
test_data = self.test_data
for key, value in test_data:
cache.add(key, value)
cache.max_length = 1
cache.trim()
count = int(cache.count())
self.assertEqual(count, 1)
def test_remove(self):
cache = self.get_structure()
test_data = self.test_data
cache.add_many(test_data)
cache.remove_many(['a'])
count = cache.count()
self.assertEqual(count, 2)
def test_remove_by_score(self):
cache = self.get_structure()
test_data = self.test_data
cache.add_many(test_data)
cache.remove_by_scores([1.0, 2.0])
count = cache.count()
self.assertEqual(count, 1)
def test_zremrangebyrank(self):
redis = get_redis_connection()
key = 'test'
# start out fresh
redis.delete(key)
redis.zadd(key, 1, 'a')
redis.zadd(key, 2, 'b')
redis.zadd(key, 3, 'c')
redis.zadd(key, 4, 'd')
redis.zadd(key, 5, 'e')
expected_results = [('a', 1.0), ('b', 2.0), ('c', 3.0), (
'd', 4.0), ('e', 5.0)]
results = redis.zrange(key, 0, -1, withscores=True)
self.assertEqual(results, expected_results)
results = redis.zrange(key, 0, -4, withscores=True)
# now the idea is to only keep 3,4,5
max_length = 3
end = (max_length * -1) - 1
redis.zremrangebyrank(key, 0, end)
expected_results = [('c', 3.0), ('d', 4.0), ('e', 5.0)]
results = redis.zrange(key, 0, -1, withscores=True)
self.assertEqual(results, expected_results)
class ListCacheTestCase(BaseRedisStructureTestCase):
def get_structure(self):
structure_class = type(
'MyCache', (RedisListCache, ), dict(max_items=10))
structure = structure_class('test')
structure.delete()
return structure
def test_append(self):
cache = self.get_structure()
cache.append_many(['a', 'b'])
self.assertEqual(cache[:5], ['a', 'b'])
self.assertEqual(cache.count(), 2)
def test_simple_append(self):
cache = self.get_structure()
for value in ['a', 'b']:
cache.append(value)
self.assertEqual(cache[:5], ['a', 'b'])
self.assertEqual(cache.count(), 2)
def test_trim(self):
cache = self.get_structure()
cache.append_many(range(100))
self.assertEqual(cache.count(), 100)
cache.trim()
self.assertEqual(cache.count(), 10)
def test_remove(self):
cache = self.get_structure()
data = ['a', 'b']
cache.append_many(data)
self.assertEqual(cache[:5], data)
self.assertEqual(cache.count(), 2)
for value in data:
cache.remove(value)
self.assertEqual(cache[:5], [])
self.assertEqual(cache.count(), 0)
class FakeFallBack(FallbackRedisListCache):
max_items = 10
def __init__(self, *args, **kwargs):
self.fallback_data = kwargs.pop('fallback')
FallbackRedisListCache.__init__(self, *args, **kwargs)
def get_fallback_results(self, start, stop):
return self.fallback_data[start:stop]
class FallbackRedisListCacheTest(ListCacheTestCase):
def get_structure(self):
structure = FakeFallBack('test', fallback=['a', 'b'])
structure.delete()
return structure
def test_remove(self):
cache = self.get_structure()
data = ['a', 'b']
cache.append_many(data)
self.assertEqual(cache[:5], data)
self.assertEqual(cache.count(), 2)
for value in data:
cache.remove(value)
self.assertEqual(cache.count(), 0)
# fallback should still work
self.assertEqual(cache[:5], data)
class SecondFallbackRedisListCacheTest(BaseRedisStructureTestCase):
def get_structure(self):
structure = FakeFallBack('test', fallback=['a', 'b', 'c'])
structure.delete()
return structure
def test_append(self):
cache = self.get_structure()
# test while we have no redis data
self.assertEqual(cache[:5], ['a', 'b', 'c'])
# now test with redis data
cache.append_many(['d', 'e', 'f', 'g'])
self.assertEqual(cache.count(), 7)
self.assertEqual(cache[:3], ['a', 'b', 'c'])
def test_slice(self):
cache = self.get_structure()
# test while we have no redis data
self.assertEqual(cache[:], ['a', 'b', 'c'])
class HashCacheTestCase(BaseRedisStructureTestCase):
def get_structure(self):
structure = RedisHashCache('test')
# always start fresh
structure.delete()
return structure
def test_set_many(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
keys = cache.keys()
self.assertEqual(keys, ['key', 'key2'])
def test_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
for key, value in key_value_pairs:
cache.set(key, value)
keys = cache.keys()
self.assertEqual(keys, ['key', 'key2'])
def test_delete_many(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
keys = cache.keys()
cache.delete_many(keys)
keys = cache.keys()
self.assertEqual(keys, [])
def test_get_and_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
results = cache.get_many(['key', 'key2'])
self.assertEqual(results, {'key2': 'value2', 'key': 'value'})
result = cache.get('key')
self.assertEqual(result, 'value')
result = cache.get('key_missing')
self.assertEqual(result, None)
def test_contains(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
result = cache.contains('key')
self.assertEqual(result, True)
result = cache.contains('key2')
self.assertEqual(result, True)
result = cache.contains('key_missing')
self.assertEqual(result, False)
def test_count(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
count = cache.count()
self.assertEqual(count, 2)
class MyFallbackHashCache(FallbackHashCache):
def get_many_from_fallback(self, fields):
return dict(zip(fields, range(100)))
class FallbackHashCacheTestCase(HashCacheTestCase):
def get_structure(self):
structure = MyFallbackHashCache('test')
# always start fresh
structure.delete()
return structure
def test_get_and_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
results = cache.get_many(['key', 'key2'])
self.assertEqual(results, {'key2': 'value2', 'key': 'value'})
result = cache.get('key')
self.assertEqual(result, 'value')
result = cache.get('key_missing')
self.assertEqual(result, 0)
class ShardedHashCacheTestCase(HashCacheTestCase):
def get_structure(self):
structure = ShardedHashCache('test')
# always start fresh
structure.delete()
return structure
def test_set_many(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
def test_get_and_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
results = cache.get_many(['key', 'key2'])
self.assertEqual(results, {'key2': 'value2', 'key': 'value'})
result = cache.get('key')
self.assertEqual(result, 'value')
result = cache.get('key_missing')
self.assertEqual(result, None)
def test_count(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
count = cache.count()
self.assertEqual(count, 2)
def test_contains(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
contains = partial(cache.contains, 'key')
self.assertRaises(NotImplementedError, contains)
| {
"content_hash": "ec58931862d227314f8ae31578f373a2",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 74,
"avg_line_length": 31.84431137724551,
"alnum_prop": 0.5866867243324558,
"repo_name": "Architizer/Feedly",
"id": "6c86959c565bad500ae2b7e9b4b4c8b99dce0965",
"size": "10636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feedly/tests/storage/redis/structures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Puppet",
"bytes": "77661"
},
{
"name": "Python",
"bytes": "206190"
},
{
"name": "Ruby",
"bytes": "259164"
},
{
"name": "Shell",
"bytes": "11876"
}
],
"symlink_target": ""
} |
"""
Site-Customize
Template file to put into a virtual-env to implicitely engage the editline
completer instead of readline (whether or not it is present)
"""
import sys
import os
def enable_line_completer():
"""Enable default line-editor configuration on interactive prompts, by
registering a sys.__interactivehook__.
Try to register support from either editline or readline.
If the readline module can be imported, the hook will set the Tab key
as completion key and register ~/.python_history as history file.
"""
def register_readline():
"""Attempt to configure the readline completion support"""
import atexit
try:
import readline
import rlcompleter
except ImportError:
return
# Reading the initialization (config) file may not be enough to set a
# completion key, so we set one first and then read the file.
readline_doc = getattr(readline, '__doc__', '')
if readline_doc is not None and 'libedit' in readline_doc:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
try:
readline.read_init_file()
except OSError:
# An OSError here could have many causes, but the most likely one
# is that there's no .inputrc file (or .editrc file in the case of
# Mac OS X + libedit) in the expected location. In that case, we
# want to ignore the exception.
pass
if readline.get_current_history_length() == 0:
# If no history was loaded, default to .python_history.
# The guard is necessary to avoid doubling history size at
# each interpreter exit when readline was already configured
# through a PYTHONSTARTUP hook, see:
# http://bugs.python.org/issue5845#msg198636
history = os.path.join(os.path.expanduser('~'),
'.python_history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
def register_editline():
"""Attempt to configure the editline completion support"""
import atexit
try:
from editline import _editline
from editline.editline import EditLine
from editline import lineeditor
editline_system = _editline.get_global_instance()
if editline_system is None:
editline_system = EditLine("PythonSystem",
sys.stdin, sys.stdout, sys.stderr)
lineeditor.global_line_editor(
lineeditor.EditlineCompleter(editline_system))
_editline.set_global_instance(editline_system)
except ImportError:
return
# the binding of ^I (tab) to the completer is done in _editline
# by default. The user can override it, but the default is correct.
# pull in the libedit defaults
try:
editrc = os.path.join(os.path.expanduser('~'), '.editrc')
editline_system.read_init_file(editrc)
except OSError:
# An OSError here could have many causes, but the most likely one
# is that there's no .inputrc file (or .editrc file in the case of
# Mac OS X + libedit) in the expected location. In that case, we
# want to ignore the exception.
pass
if editline_system.get_current_history_length() == 0:
# If no history was loaded, default to .python_history.
# The guard is necessary to avoid doubling history size at
# each interpreter exit when readline was already configured
# through a PYTHONSTARTUP hook, see:
# http://bugs.python.org/issue5845#msg198636
history = os.path.join(os.path.expanduser('~'), '.python_history')
try:
editline_system.read_history_file(history)
except IOError:
pass
atexit.register(editline_system.write_history_file, history)
# snoop to see which is available don't import the modules, just check
# for a valid loader so we don't pollute the namespace accidentally.
import pkgutil
le_loader = pkgutil.get_loader('editline.lineeditor')
el_loader = pkgutil.get_loader('editline.editline')
_el_loader = pkgutil.get_loader('editline._editline')
rl_loader = pkgutil.get_loader('readline')
# prefer editline
if le_loader and el_loader and _el_loader:
if sys.version_info[0] >= 3 and sys.version_info[1] > 3:
sys.__interactivehook__ = register_editline
else:
register_editline()
elif rl_loader:
if sys.version_info[0] >= 3 and sys.version_info[1] > 3:
sys.__interactivehook__ = register_readline
else:
register_readline()
else:
if sys.version_info[0] >= 3 and sys.version_info[1] > 3:
sys.__interactivehook__ = None
def main():
"""Mimick the format of the true site.py
This function is called automatically when this module is imported,
unless the python interpreter was started with the -S flag.
"""
enable_line_completer()
# Prevent extending of sys.path when python was started with -S and
# site is imported later.
if not sys.flags.no_site:
main()
| {
"content_hash": "db20af8b2884777fb1af83bb5d9c39bb",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 39.67142857142857,
"alnum_prop": 0.6121714079942384,
"repo_name": "mark-nicholson/python-editline",
"id": "b78b40872b7c65f403f06fb5c821df2cb15d0c56",
"size": "5554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitecustomize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "598138"
},
{
"name": "Makefile",
"bytes": "3194"
},
{
"name": "Objective-C",
"bytes": "11490"
},
{
"name": "Python",
"bytes": "121195"
},
{
"name": "Shell",
"bytes": "14904"
}
],
"symlink_target": ""
} |
"""
Tests for checking the configuration for app has
the valid checks (gstorage.apps, gstorage.checks)
"""
from mock import patch
from os import environ
from unittest import TestCase
from django.conf import settings
from gstorage.checks import check_gstorage_params
key = 'GOOGLE_APPLICATION_CREDENTIALS'
@patch('gstorage.checks.REQUIRED_SETTINGS', [key])
class TestStorageConfig(TestCase):
def setUp(self):
try:
del environ[key]
except KeyError:
pass
def test_key_error(self):
"""Test that app check throws an error when required setting is missing"""
errors = check_gstorage_params()
assert len(errors) >= 1
assert errors[0].id == 'gstorage.001'
assert errors[0].msg.find(key) == 0
def test_key_settings(self):
"""Test that app check has no errors when settings is specified"""
setattr(settings, key, '/tmp')
assert check_gstorage_params() == []
def test_key_env(self):
"""Test that app check has no errors when settings is specified as environment variable"""
environ[key] = '/tmp'
assert check_gstorage_params() == []
| {
"content_hash": "bc64562781066f39512e0a5a85a597a9",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 98,
"avg_line_length": 29.375,
"alnum_prop": 0.6570212765957447,
"repo_name": "fyndiq/django-gstorage",
"id": "98ad09151b6605ebcf2b66d8f9f4ef6f7e644cc5",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_checks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "200"
},
{
"name": "Python",
"bytes": "21007"
}
],
"symlink_target": ""
} |
class Solution:
# @param {char[]} string: An array of Char
# @param {int} length: The true length of the string
# @return {int} The true length of new string
def replaceBlank(self, string, length):
if not string: return string
s = ''
for i in string:
s += i
s = s.replace(' ','%20')
while len(string): string.pop()
for i in s:
string.append(i)
return len(s)
| {
"content_hash": "204846d3c45ebbd57bd6c0d4106b036c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.539647577092511,
"repo_name": "1ta/study_python",
"id": "3cc6a1633db25d0f975001d88f40fdabaa1162a1",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/sun/practice/replace_space.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "2974"
},
{
"name": "Python",
"bytes": "27948"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import time
import numpy as np
from scipy.sparse.linalg import LinearOperator
from .._differentiable_functions import VectorFunction
from .._constraints import (
NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds)
from .._hessian_update_strategy import BFGS
from ..optimize import OptimizeResult
from .._differentiable_functions import ScalarFunction
from .equality_constrained_sqp import equality_constrained_sqp
from .canonical_constraint import (CanonicalConstraint,
initial_constraints_as_canonical)
from .tr_interior_point import tr_interior_point
from .report import BasicReport, SQPReport, IPReport
TERMINATION_MESSAGES = {
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`xtol` termination condition is satisfied.",
3: "`callback` function requested termination"
}
class HessianLinearOperator(object):
"""Build LinearOperator from hessp"""
def __init__(self, hessp, n):
self.hessp = hessp
self.n = n
def __call__(self, x, *args):
def matvec(p):
return self.hessp(x, p, *args)
return LinearOperator((self.n, self.n), matvec=matvec)
class LagrangianHessian(object):
"""The Hessian of the Lagrangian as LinearOperator.
The Lagrangian is computed as the objective function plus all the
constraints multiplied with some numbers (Lagrange multipliers).
"""
def __init__(self, n, objective_hess, constraints_hess):
self.n = n
self.objective_hess = objective_hess
self.constraints_hess = constraints_hess
def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
H_objective = self.objective_hess(x)
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
def matvec(p):
return H_objective.dot(p) + H_constraints.dot(p)
return LinearOperator((self.n, self.n), matvec)
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
start_time, tr_radius, constr_penalty, cg_info):
state.nit += 1
state.nfev = objective.nfev
state.njev = objective.ngev
state.nhev = objective.nhev
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
if not last_iteration_failed:
state.x = x
state.fun = objective.f
state.grad = objective.g
state.v = [c.fun.v for c in prepared_constraints]
state.constr = [c.fun.f for c in prepared_constraints]
state.jac = [c.fun.J for c in prepared_constraints]
# Compute Lagrangian Gradient
state.lagrangian_grad = np.copy(state.grad)
for c in prepared_constraints:
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
# Compute maximum constraint violation
state.constr_violation = 0
for i in range(len(prepared_constraints)):
lb, ub = prepared_constraints[i].bounds
c = state.constr[i]
state.constr_violation = np.max([state.constr_violation,
np.max(lb - c),
np.max(c - ub)])
state.execution_time = time.time() - start_time
state.tr_radius = tr_radius
state.constr_penalty = constr_penalty
state.cg_niter += cg_info["niter"]
state.cg_stop_cond = cg_info["stop_cond"]
return state
def update_state_ip(state, x, last_iteration_failed, objective,
prepared_constraints, start_time,
tr_radius, constr_penalty, cg_info,
barrier_parameter, barrier_tolerance):
state = update_state_sqp(state, x, last_iteration_failed, objective,
prepared_constraints, start_time, tr_radius,
constr_penalty, cg_info)
state.barrier_parameter = barrier_parameter
state.barrier_tolerance = barrier_tolerance
return state
def _minimize_trustregion_constr(fun, x0, args, grad,
hess, hessp, bounds, constraints,
xtol=1e-8, gtol=1e-8,
barrier_tol=1e-8,
sparse_jacobian=None,
callback=None, maxiter=1000,
verbose=0, finite_diff_rel_step=None,
initial_constr_penalty=1.0, initial_tr_radius=1.0,
initial_barrier_parameter=0.1,
initial_barrier_tolerance=0.1,
factorization_method=None,
disp=False):
"""Minimize a scalar function subject to constraints.
Parameters
----------
gtol : float, optional
Tolerance for termination by the norm of the Lagrangian gradient.
The algorithm will terminate when both the infinity norm (i.e. max
abs value) of the Lagrangian gradient and the constraint violation
are smaller than ``gtol``. Default is 1e-8.
xtol : float, optional
Tolerance for termination by the change of the independent variable.
The algorithm will terminate when ``tr_radius < xtol``, where
``tr_radius`` is the radius of the trust region used in the algorithm.
Default is 1e-8.
barrier_tol : float, optional
Threshold on the barrier parameter for the algorithm termination.
When inequality constraints are present the algorithm will terminate
only when the barrier parameter is less than `barrier_tol`.
Default is 1e-8.
sparse_jacobian : {bool, None}, optional
Determines how to represent Jacobians of the constraints. If bool,
then Jacobians of all the constraints will be converted to the
corresponding format. If None (default), then Jacobians won't be
converted, but the algorithm can proceed only if they all have the
same format.
initial_tr_radius: float, optional
Initial trust radius. The trust radius gives the maximum distance
between solution points in consecutive iterations. It reflects the
trust the algorithm puts in the local approximation of the optimization
problem. For an accurate local approximation the trust-region should be
large and for an approximation valid only close to the current point it
should be a small one. The trust radius is automatically updated throughout
the optimization process, with ``initial_tr_radius`` being its initial value.
Default is 1 (recommended in [1]_, p. 19).
initial_constr_penalty : float, optional
Initial constraints penalty parameter. The penalty parameter is used for
balancing the requirements of decreasing the objective function
and satisfying the constraints. It is used for defining the merit function:
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
the constraints. The merit function is used for accepting or rejecting
trial points and ``constr_penalty`` weights the two conflicting goals
of reducing objective function and constraints. The penalty is automatically
updated throughout the optimization process, with
``initial_constr_penalty`` being its initial value. Default is 1
(recommended in [1]_, p 19).
initial_barrier_parameter, initial_barrier_tolerance: float, optional
Initial barrier parameter and initial tolerance for the barrier subproblem.
Both are used only when inequality constraints are present. For dealing with
optimization problems ``min_x f(x)`` subject to inequality constraints
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
is solved for increasing values of ``barrier_parameter`` and with decreasing
tolerances for the termination, starting with ``initial_barrier_parameter``
for the barrier parameter and ``initial_barrier_tolerance`` for the
barrier subproblem barrier. Default is 0.1 for both values (recommended in [1]_ p. 19).
factorization_method : string or None, optional
Method to factorize the Jacobian of the constraints. Use None (default)
for the auto selection or one of:
- 'NormalEquation' (requires scikit-sparse)
- 'AugmentedSystem'
- 'QRFactorization'
- 'SVDFactorization'
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
with sparse constraints. The projections required by the algorithm
will be computed using, respectively, the the normal equation and the
augmented system approaches explained in [1]_. 'NormalEquation'
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
performs the LU factorization of an augmented system. They usually
provide similar results. 'AugmentedSystem' is used by default for
sparse matrices.
The methods 'QRFactorization' and 'SVDFactorization' can be used
only with dense constraints. They compute the required projections
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
method can cope with Jacobian matrices with deficient row rank and will
be used whenever other factorization methods fail (which may imply the
conversion of sparse matrices to a dense format when required).
By default 'QRFactorization' is used for dense matrices.
finite_diff_rel_step : None or array_like, optional
Relative step size for the finite difference approximation.
maxiter : int, optional
Maximum number of algorithm iterations. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
* 3 : display progress during iterations (more complete report).
disp : bool, optional
If True (default) then `verbose` will be set to 1 if it was 0.
Returns
-------
`OptimizeResult` with the fields documented below. Note the following:
1. All values corresponding to the constraints are ordered as they
were passed to the solver. And values corresponding to `bounds`
constraints are put *after* other constraints.
2. All numbers of function, Jacobian or Hessian evaluations correspond
to numbers of actual Python function calls. It means, for example,
that if a Jacobian is estimated by finite differences then the
number of Jacobian evaluations will be zero and the number of
function evaluations will be incremented by all calls during the
finite difference estimation.
x : ndarray, shape (n,)
Solution found.
optimality : float
Infinity norm of the Lagrangian gradient at the solution.
constr_violation : float
Maximum constraint violation at the solution.
fun : float
Objective function at the solution.
grad : ndarray, shape (n,)
Gradient of the objective function at the solution.
lagrangian_grad : ndarray, shape (n,)
Gradient of the Lagrangian function at the solution.
nit : int
Total number of iterations.
nfev : integer
Number of the objective function evaluations.
ngev : integer
Number of the objective function gradient evaluations.
nhev : integer
Number of the objective function Hessian evaluations.
cg_niter : int
Total number of the conjugate gradient method iterations.
method : {'equality_constrained_sqp', 'tr_interior_point'}
Optimization method used.
constr : list of ndarray
List of constraint values at the solution.
jac : list of {ndarray, sparse matrix}
List of the Jacobian matrices of the constraints at the solution.
v : list of ndarray
List of the Lagrange multipliers for the constraints at the solution.
For an inequality constraint a positive multiplier means that the upper
bound is active, a negative multiplier means that the lower bound is
active and if a multiplier is zero it means the constraint is not
active.
constr_nfev : list of int
Number of constraint evaluations for each of the constraints.
constr_njev : list of int
Number of Jacobian matrix evaluations for each of the constraints.
constr_nhev : list of int
Number of Hessian evaluations for each of the constraints.
tr_radius : float
Radius of the trust region at the last iteration.
constr_penalty : float
Penalty parameter at the last iteration, see `initial_constr_penalty`.
barrier_tolerance : float
Tolerance for the barrier subproblem at the last iteration.
Only for problems with inequality constraints.
barrier_parameter : float
Barrier parameter at the last iteration. Only for problems
with inequality constraints.
execution_time : float
Total execution time.
message : str
Termination message.
status : {0, 1, 2, 3}
Termination status:
* 0 : The maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `xtol` termination condition is satisfied.
* 3 : `callback` function requested termination.
cg_stop_cond : int
Reason for CG subproblem termination at the last iteration:
* 0 : CG subproblem not evaluated.
* 1 : Iteration limit was reached.
* 2 : Reached the trust-region boundary.
* 3 : Negative curvature detected.
* 4 : Tolerance was satisfied.
"""
x0 = np.atleast_1d(x0).astype(float)
n_vars = np.size(x0)
if hess is None:
if callable(hessp):
hess = HessianLinearOperator(hessp, n_vars)
else:
hess = BFGS()
if disp and verbose == 0:
verbose = 1
if bounds is not None:
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
bounds.keep_feasible, n_vars)
else:
finite_diff_bounds = (-np.inf, np.inf)
# Define Objective Function
objective = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, finite_diff_bounds)
# Put constraints in list format when needed
if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
constraints = [constraints]
# Prepare constraints.
prepared_constraints = [
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
for c in constraints]
# Check that all constraints are either sparse or dense.
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
if 0 < n_sparse < len(prepared_constraints):
raise ValueError("All constraints must have the same kind of the "
"Jacobian --- either all sparse or all dense. "
"You can set the sparsity globally by setting "
"`sparse_jacobian` to either True of False.")
if prepared_constraints:
sparse_jacobian = n_sparse > 0
if bounds is not None:
if sparse_jacobian is None:
sparse_jacobian = True
prepared_constraints.append(PreparedConstraint(bounds, x0,
sparse_jacobian))
# Concatenate initial constraints to the canonical form.
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
n_vars, prepared_constraints, sparse_jacobian)
# Prepare all canonical constraints and concatenate it into one.
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
for c in prepared_constraints]
if len(canonical_all) == 0:
canonical = CanonicalConstraint.empty(n_vars)
elif len(canonical_all) == 1:
canonical = canonical_all[0]
else:
canonical = CanonicalConstraint.concatenate(canonical_all,
sparse_jacobian)
# Generate the Hessian of the Lagrangian.
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
# Choose appropriate method
if canonical.n_ineq == 0:
method = 'equality_constrained_sqp'
else:
method = 'tr_interior_point'
# Construct OptimizeResult
state = OptimizeResult(
nit=0, nfev=0, njev=0, nhev=0,
cg_niter=0, cg_stop_cond=0,
fun=objective.f, grad=objective.g,
lagrangian_grad=np.copy(objective.g),
constr=[c.fun.f for c in prepared_constraints],
jac=[c.fun.J for c in prepared_constraints],
constr_nfev=[0 for c in prepared_constraints],
constr_njev=[0 for c in prepared_constraints],
constr_nhev=[0 for c in prepared_constraints],
v=[c.fun.v for c in prepared_constraints],
method=method)
# Start counting
start_time = time.time()
# Define stop criteria
if method == 'equality_constrained_sqp':
def stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
tr_radius, constr_penalty, cg_info):
state = update_state_sqp(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
SQPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.cg_stop_cond)
state.status = None
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif state.tr_radius < xtol:
state.status = 2
elif state.nit > maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
elif method == 'tr_interior_point':
def stop_criteria(state, x, last_iteration_failed, tr_radius,
constr_penalty, cg_info, barrier_parameter,
barrier_tolerance):
state = update_state_ip(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info, barrier_parameter, barrier_tolerance)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
IPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.barrier_parameter,
state.cg_stop_cond)
state.status = None
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif (state.tr_radius < xtol
and state.barrier_parameter < barrier_tol):
state.status = 2
elif state.nit > maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
if verbose == 2:
BasicReport.print_header()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_header()
elif method == 'tr_interior_point':
IPReport.print_header()
# Call inferior function to do the optimization
if method == 'equality_constrained_sqp':
def fun_and_constr(x):
f = objective.fun(x)
c_eq, _ = canonical.fun(x)
return f, c_eq
def grad_and_jac(x):
g = objective.grad(x)
J_eq, _ = canonical.jac(x)
return g, J_eq
_, result = equality_constrained_sqp(
fun_and_constr, grad_and_jac, lagrangian_hess,
x0, objective.f, objective.g,
c_eq0, J_eq0,
stop_criteria, state,
initial_constr_penalty, initial_tr_radius,
factorization_method)
elif method == 'tr_interior_point':
_, result = tr_interior_point(
objective.fun, objective.grad, lagrangian_hess,
n_vars, canonical.n_ineq, canonical.n_eq,
canonical.fun, canonical.jac,
x0, objective.f, objective.g,
c_ineq0, J_ineq0, c_eq0, J_eq0,
stop_criteria,
canonical.keep_feasible,
xtol, state, initial_barrier_parameter,
initial_barrier_tolerance,
initial_constr_penalty, initial_tr_radius,
factorization_method)
# Status 3 occurs when the callback function requests termination,
# this is assumed to not be a success.
result.success = True if result.status in (1, 2) else False
result.message = TERMINATION_MESSAGES[result.status]
if verbose == 2:
BasicReport.print_footer()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_footer()
elif method == 'tr_interior_point':
IPReport.print_footer()
if verbose >= 1:
print(result.message)
print("Number of iterations: {}, function evaluations: {}, "
"CG iterations: {}, optimality: {:.2e}, "
"constraint violation: {:.2e}, execution time: {:4.2} s."
.format(result.nit, result.nfev, result.cg_niter,
result.optimality, result.constr_violation,
result.execution_time))
return result
| {
"content_hash": "d805373cb667ebd36d64d36eb9490751",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 96,
"avg_line_length": 45.837078651685395,
"alnum_prop": 0.5963149078726968,
"repo_name": "gfyoung/scipy",
"id": "2a7238722a08e9e3b5bc4321dbb50446024593ea",
"size": "24477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4142653"
},
{
"name": "C++",
"bytes": "498142"
},
{
"name": "Fortran",
"bytes": "5572451"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11540629"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""
Additional tests for PandasArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import PandasDtype
import pandas as pd
import pandas._testing as tm
from pandas.arrays import PandasArray
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def any_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# PandasDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = PandasDtype(np.dtype("int64"))
assert repr(dtype) == "PandasDtype('int64')"
def test_constructor_from_string():
result = PandasDtype.construct_from_string("int64")
expected = PandasDtype(np.dtype("int64"))
assert result == expected
def test_dtype_univalent(any_numpy_dtype):
dtype = PandasDtype(any_numpy_dtype)
result = PandasDtype(dtype)
assert result == dtype
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
PandasArray([1, 2, 3])
def test_series_constructor_with_copy():
ndarray = np.array([1, 2, 3])
ser = pd.Series(PandasArray(ndarray), copy=True)
assert ser.values is not ndarray
def test_series_constructor_with_astype():
ndarray = np.array([1, 2, 3])
result = pd.Series(PandasArray(ndarray), dtype="float64")
expected = pd.Series([1.0, 2.0, 3.0], dtype="float64")
tm.assert_series_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = PandasArray._from_sequence(arr, dtype="uint64")
expected = PandasArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_copy():
arr = np.array([0, 1])
result = PandasArray(arr, copy=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr)
assert arr.dtype.numpy_dtype == nparr.dtype
# ----------------------------------------------------------------------------
# Conversion
def test_to_numpy():
arr = PandasArray(np.array([1, 2, 3]))
result = arr.to_numpy()
assert result is arr._ndarray
result = arr.to_numpy(copy=True)
assert result is not arr._ndarray
result = arr.to_numpy(dtype="f8")
expected = np.array([1, 2, 3], dtype="f8")
tm.assert_numpy_array_equal(result, expected)
# ----------------------------------------------------------------------------
# Setitem
def test_setitem_series():
ser = pd.Series([1, 2, 3])
ser.array[0] = 10
expected = pd.Series([10, 2, 3])
tm.assert_series_equal(ser, expected)
def test_setitem(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr, copy=True)
arr[0] = arr[1]
nparr[0] = nparr[1]
tm.assert_numpy_array_equal(arr.to_numpy(), nparr)
# ----------------------------------------------------------------------------
# Reductions
def test_bad_reduce_raises():
arr = np.array([1, 2, 3], dtype="int64")
arr = PandasArray(arr)
msg = "cannot perform not_a_method with type int"
with pytest.raises(TypeError, match=msg):
arr._reduce(msg)
def test_validate_reduction_keyword_args():
arr = PandasArray(np.array([1, 2, 3]))
msg = "the 'keepdims' parameter is not supported .*all"
with pytest.raises(ValueError, match=msg):
arr.all(keepdims=True)
# ----------------------------------------------------------------------------
# Ops
@pytest.mark.parametrize("ufunc", [np.abs, np.negative, np.positive])
def test_ufunc_unary(ufunc):
arr = PandasArray(np.array([-1.0, 0.0, 1.0]))
result = ufunc(arr)
expected = PandasArray(ufunc(arr._ndarray))
tm.assert_extension_array_equal(result, expected)
def test_ufunc():
arr = PandasArray(np.array([-1.0, 0.0, 1.0]))
r1, r2 = np.divmod(arr, np.add(arr, 2))
e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2))
e1 = PandasArray(e1)
e2 = PandasArray(e2)
tm.assert_extension_array_equal(r1, e1)
tm.assert_extension_array_equal(r2, e2)
def test_basic_binop():
# Just a basic smoke test. The EA interface tests exercise this
# more thoroughly.
x = PandasArray(np.array([1, 2, 3]))
result = x + x
expected = PandasArray(np.array([2, 4, 6]))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_setitem_object_typecode(dtype):
arr = PandasArray(np.array(["a", "b", "c"], dtype=dtype))
arr[0] = "t"
expected = PandasArray(np.array(["t", "b", "c"], dtype=dtype))
tm.assert_extension_array_equal(arr, expected)
def test_setitem_no_coercion():
# https://github.com/pandas-dev/pandas/issues/28150
arr = PandasArray(np.array([1, 2, 3]))
with pytest.raises(ValueError, match="int"):
arr[0] = "a"
# With a value that we do coerce, check that we coerce the value
# and not the underlying array.
arr[0] = 2.5
assert isinstance(arr[0], (int, np.integer)), type(arr[0])
def test_setitem_preserves_views():
# GH#28150, see also extension test of the same name
arr = PandasArray(np.array([1, 2, 3]))
view1 = arr.view()
view2 = arr[:]
view3 = np.asarray(arr)
arr[0] = 9
assert view1[0] == 9
assert view2[0] == 9
assert view3[0] == 9
arr[-1] = 2.5
view1[-1] = 5
assert arr[-1] == 5
| {
"content_hash": "fa872709ecadeb05929ce34c240452d0",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 78,
"avg_line_length": 25.96197718631179,
"alnum_prop": 0.5774751025190392,
"repo_name": "jorisvandenbossche/pandas",
"id": "e8e9ee86e77dd677036a0e7ce9e709a68d90bd64",
"size": "6828",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/arrays/test_numpy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360342"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1083849"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17541583"
},
{
"name": "Shell",
"bytes": "10719"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
from google.cloud import vmmigration_v1
def sample_list_target_projects():
# Create a client
client = vmmigration_v1.VmMigrationClient()
# Initialize request argument(s)
request = vmmigration_v1.ListTargetProjectsRequest(
parent="parent_value",
page_token="page_token_value",
)
# Make the request
page_result = client.list_target_projects(request=request)
# Handle the response
for response in page_result:
print(response)
# [END vmmigration_v1_generated_VmMigration_ListTargetProjects_sync]
| {
"content_hash": "8d9b84608f79d233bac6a8550fa618a8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 26.571428571428573,
"alnum_prop": 0.7060931899641577,
"repo_name": "googleapis/python-vm-migration",
"id": "9b31ed21a2ed53df0dda50c2ca54ac3af148dec2",
"size": "1960",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/vmmigration_v1_generated_vm_migration_list_target_projects_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1306401"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
} |
""" TwitterCmd - Interactive, console prompt to twitter
written on top of the python-twitter API.
A lot of credit to DeWitt Clinton and his team of
developers for the excellent 'python-twitter' API on
top of which, this program is written on.
Author: Anand B Pillai ('pythonhacker')
License: BSD License
"""
from twitter import Api
from cmd import Cmd
import sys, os
import re
import urllib2
import optparse
import time
import glob
import cPickle
import subprocess
__version__ = 0.3
__author__ = 'Anand B Pillai'
__lastmodified__ = "Thu Oct 8 14:39:24 IST 2009"
__modifications__ = """This version gets session save feature
and ability to enter Python prompt from TwitterCmd and back
while keeping state same across both. Basically you get the
simplicity of TwitterCmd plus the power of python-twitter
in one go!
Oct 8 - Bug in api attribute fixed, loading it fresh always.
Reduced stuff printed in inspecting timeline & user.
"""
LOADER="""
from twittercmd import *
from twitter import Api
import cPickle, urllib2
obj=cPickle.load(open('%s', 'rb'))
if obj.username:
obj.api=Api(obj.username)
else:
obj.api=Api()
g=globals()
g.update(obj.__dict__)
"""
INTRO="""Welcome to TwitterCmd, a command line interface to twitter,
written in Python. TwitterCmd provides a simple, interactive
interface to Twitter with short, easy to remember commands.
For a full set of commands, type "help" on the prompt.
For help for a specific command try "help <cmd>".
Please send comments/bug reports to <abpillai at gmail dot com>
or tweet them to the ID 'pythonhacker'.
"""
HELP="""
The following commands are available.
User commands
1. l <user> <passwd> - Set login credentials.
2. u <user> - Get details of a user.
3. f - Get friend's details.
4. fo - Get followers' details.
Timeline commands
1. pt - Get public timeline.
2. ut [<user>] - Get current user timeline
(given no arguments), or
timeline of given user.
3. ft - Get all friends' timeline.
Message commands
1. m <msg> - Post a status message.
2. im <user> <msg> - Post a direct message to user.
3. r - Get replies to messages.
4. dm - Get all direct messages.
You can also use the 'inspect' or 'i' command to
inspect live objects. The following object types are
supported as argument.
1. s/status - Inspect current msg status.
2. f/p/friends/peers - Inspect current friends status.
3. t/tl/tline - Inspect current timeline.
4. u/user - Inspect current user.
Version 0.2 also adds the power of entering Python
prompt while keeping state intact. To enter Python
prompt use any of 'shell','py' or 'python' commands.
Examples:
# Post a message
Twitter> l user pass
Twitter> m Hey, I love twitter!
# Get direct messages
Twitter> dm
["'No problem. :-)' => dloss"]
# Get names of all friends
Twitter> f
['dloss', 'gvanrossum', 'MallikaLA', 'BarackObama', 'ShashiTharoor']
# Use Python prompt from TwitterCmd
Twitter> py
Entering Python shell...
>>>
# Inspect objects in shell and use python-twitter
# directly by using the 'api' object.
>>> api
<twitter.Api object at 0x7fb69e6509d0>
# All state of TwitterCmd is available as globals
# in the interpreter...
>>> tline
[<twitter.Status object at 0xe643ad0>, <twitter.Status object at 0xe643d50>]
>>> status
<twitter.Status object at 0x7fb69e643850>
>>> status.text
'Completed session saving and entering Python shell and back from TwitterCmd\
, code will be up in a while, Hooray!'
# Use api just as you would before...!
>>> [s.text for s in api.GetPublicTimeline()]
['tweeting from gravity',...]
# Exit Python prompt
>>> ^D
Exiting Python shell...
Twitter>
To exit a session, type q/quit/exit or Ctrl-D.
"""
class TwitterCmdException(Exception):
pass
class TwitterCmd(Cmd):
""" Python command interpreter to Twitter API. Allows a simple
interactive interface to Twitter to those who prefer twittering
from their *nix console, while providing the flexibility to
switch back and forth from Python interactive prompt keeping
state intact """
commands = {'m' : 'PostUpdate', 'im': 'PostDirectMessage',
'ut': 'GetUserTimeline', 'pt': 'GetPublicTimeline',
'ft': 'GetFriendsTimeline', 'fo': 'GetFollowers',
'f': 'GetFriends', 'r': 'GetReplies',
'dm': 'GetDirectMessages', 'u':'GetUser',
'l': 'SetCredentials'}
prompt = 'Twitter> '
intro = INTRO
onecmd = lambda self, line: (line==None) and True
emptyline = lambda self: None
default = lambda self, line: None
def __init__(self, user='', passwd=''):
self.api = Api()
if user and passwd:
self.api.SetCredentials(user, passwd)
# Current username
self.username = user
# Current command
self.cmd = ''
# Current message status
self.status = None
# Current timeline
self.tline = None
# Current user object
self.user = None
# Current friends/followers
self.peers = None
# System stuff
# Python executable
self.python = sys.executable
Cmd.__init__(self)
# Load previous session
self.load_session()
def __getstate__(self):
odict = self.__dict__.copy()
del odict['stdin']
del odict['stdout']
del odict['api']
return odict
def __setstate__(self, dict):
# Don't update api object...
try:
del dict['api']
except KeyError:
pass
self.__dict__.update(dict)
def precmd(self, line):
line = line.strip()
if len(line)==0:
return line
if line.lower() in ('q','quit','exit','eof'):
print
self.save_session()
return None
if line.lower().startswith('help'):
self.print_help(line)
return line
if line.lower() in ('shell','py','python'):
# Start Python interpreter with current state
self.run_wild()
return line
l = line.split(' ')
cmd, rest = l[0], l[1:]
# Inspect objects ?
if cmd.lower() in ('i','inspect'):
self.inspect(' '.join(l[1:]).lower())
return line
elif cmd not in self.commands:
print "Command '%s' not understood" % cmd
return line
self.cmd = cmd.strip()
try:
self.action(*rest)
except IndexError, e:
print 'Command "%s" requires arguments!' % self.cmd
except urllib2.HTTPError, e:
print 'Twitter says:',e
# Any other exception
except Exception, e:
print 'Twitter API says:',e
return line
def inspect(self, obj_type):
""" Inspect our objects """
if obj_type in ('status', 's', 'st'):
if type(self.status) is list:
if self.status: print [str(i) for i in self.status]
else:
print str(self.status)
elif obj_type in ('tline','t','tl'):
if self.tline: print [i.text for i in self.tline]
elif obj_type in ('user','u'):
print self.user
elif obj_type in ('peers','friends','p','f'):
if self.peers: print [(i.name, i.screen_name) for i in self.peers]
else:
print 'Unknown object type',obj_type
def action(self, *args):
""" Perform a twitter action """
f = getattr(self.api, self.commands.get(self.cmd, None))
if self.cmd == 'l':
if len(args)>=2:
f(args[0], args[1])
else:
f(args[0], '')
print 'Set login credentials'
elif self.cmd == 'm':
# Force IndexError
x = args[0]
self.status = f(' '.join(args))
print repr(self.status)
elif self.cmd == 'im':
# Force IndexError
x = args[0]
self.status = f(args[0], ' '.join(args[1:]))
print repr(self.status)
elif self.cmd == 'ut':
self.tline = [f(args[0]) if len(args) else f()][0]
print [s.text for s in self.tline]
elif self.cmd == 'pt':
self.tline = f()
print [s.text for s in self.tline]
elif self.cmd == 'ft':
self.tline = f()
print [s.text for s in self.tline]
elif self.cmd == 'r':
self.status = f()
print [s.text for s in self.status]
elif self.cmd == 'dm':
self.status = f()
print [' => '.join(("'" + s.text + "'", s._sender_screen_name)) \
for s in self.status]
elif self.cmd in ('f','fo'):
self.peers = f()
print [s.screen_name for s in self.peers]
elif self.cmd=='u':
self.user = f(args[0])
print self.user.name
def load_session(self):
""" Load most recent session from disk, if present """
fnames = glob.glob(os.path.join(os.path.expanduser('~'),'.twitter_session_*'))
if len(fnames):
print 'Loading saved session...'
try:
obj = cPickle.load(open(fnames[0], 'rb'))
self.__setstate__(obj.__dict__)
except cPickle.UnpicklingError, e:
print 'Error loading saved session'
def save_session(self):
""" Save current session to disk """
fname = os.path.join(os.path.expanduser('~'), '.twitter_session_%d' % int(time.time()))
try:
# Remove older sessions
olds = glob.glob(os.path.join(os.path.expanduser('~'),'.twitter_session_*'))
cPickle.dump(self, open(fname, 'wb'))
# Remove older sessions
for f in olds:
try:
os.remove(f)
except OSError, e:
pass
except cPickle.PicklingError, e:
print 'Error saving current session to disk:',e
return fname
def run_wild(self):
session = self.save_session()
# Create a module that will load the saved session
loader = LOADER % session
module = '.twitter_loader.py'
open(module,'w').write(loader)
print 'Entering Python shell...'
subprocess.call([self.python,"-i", module])
print 'Exiting Python shell...'
def print_help(self, line):
if line.lower()=='help':
print HELP
else:
l = line.split(' ')
cmd, rest = l[0], ' '.join(l[1:])
if cmd.lower() != 'help':
return
if rest=='m':
print 'm <msg>: Post status message <msg>'
elif rest=='im':
print 'm <user> <msg>: Post direct message <msg> to user <user>'
elif rest=='ut':
print 'ut [<user>]: Get current user timeline without arguments\n\
and given user timeline with arguments'
elif rest=='pt':
print 'pt: Get twitter public timeline'
elif rest=='ft':
print "ft: Get all friends' timelines"
elif rest=='fo':
print 'fo: Get all follower details'
elif rest=='f':
print "f: Get all friends' details"
elif rest=='dm':
print 'dm: Get all direct messages'
elif rest=='r':
print 'r: Get all replies to direct messages'
elif rest=='u':
print 'u <user>: Get a given user details'
elif rest=='l':
print 'l <user> <passwd>: Set current user credentials'
elif rest in ('i','inspect'):
print 'i <obj_type>: Inspect object of given type'
else:
print 'No such command "%s"' % rest
if __name__ == "__main__":
p = optparse.OptionParser()
p.add_option('-u','--user',dest='user',default='',help='Optional Twitter username')
p.add_option('-p','--passwd',dest='passw',default='',help='Optional Twitter password')
options, args = p.parse_args()
user, passwd = options.user, options.passw
TwitterCmd(user, passwd).cmdloop()
| {
"content_hash": "ddbab9f742e99bcf7c874df6a9c84e43",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 95,
"avg_line_length": 31.99492385786802,
"alnum_prop": 0.5579882595589402,
"repo_name": "ActiveState/code",
"id": "7c083d5a06f6cb1d1848fc5e73bcd992e8df2b7f",
"size": "12629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/576923_TwitterCmd__Interactive_console/recipe-576923.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""Library for creating periodic distribution shift tasks on Stack Overflow."""
# TODO(b/193904908): add unit tests.
from typing import Optional
import tensorflow as tf
import tensorflow_federated as tff
from periodic_distribution_shift.datasets import stackoverflow_nwp_preprocessing as word_prediction_preprocessing
from periodic_distribution_shift.models import keras_utils_dual_branch_kmeans_lm
from periodic_distribution_shift.tasks import dist_shift_task
from periodic_distribution_shift.tasks import dist_shift_task_data
constants = tff.simulation.baselines.stackoverflow.constants
def filter_qa_fn(q_or_a):
def filter_(sample):
return tf.math.equal(sample['type'], tf.constant(q_or_a))
return filter_
class TransposableEmbedding(tf.keras.layers.Layer):
"""A Keras layer implementing a transposed projection output layer."""
def __init__(self, embedding_layer: tf.keras.layers.Embedding):
super().__init__()
self.embeddings = embedding_layer.embeddings
# Placing `tf.matmul` under the `call` method is important for backpropagating
# the gradients of `self.embeddings` in graph mode.
def call(self, inputs):
return tf.matmul(inputs, self.embeddings, transpose_b=True)
def create_recurrent_model(vocab_size: int,
embedding_size: int = 96,
num_lstm_layers: int = 1,
lstm_size: int = 670,
shared_embedding: bool = False) -> tf.keras.Model:
"""Constructs a recurrent model with an initial embeding layer.
The resulting model embeds sequences of integer tokens (whose values vary
between `0` and `vocab_size-1`) into an `embedding_size`-dimensional space.
It then applies `num_lstm_layers` LSTM layers, each of size `lstm_size`.
Each LSTM is followed by a dense layer mapping the output to `embedding_size`
units. The model then has a final dense layer mapping to `vocab_size` logits
units. Note that this model does not compute any kind of softmax on the final
logits. This should instead be done in the loss function for the purposes of
backpropagation.
Args:
vocab_size: Vocabulary size to use in the initial embedding layer.
embedding_size: The size of the embedding layer.
num_lstm_layers: The number of LSTM layers in the model.
lstm_size: The size of each LSTM layer.
shared_embedding: If set to `True`, the final layer of the model is a dense
layer given by the transposition of the embedding layer. If `False`, the
final dense layer is instead learned separately.
Returns:
An uncompiled `tf.keras.Model`.
"""
if vocab_size < 1:
raise ValueError('vocab_size must be a positive integer.')
if embedding_size < 1:
raise ValueError('embedding_size must be a positive integer.')
if num_lstm_layers < 1:
raise ValueError('num_lstm_layers must be a positive integer.')
if lstm_size < 1:
raise ValueError('lstm_size must be a positive integer.')
inputs = tf.keras.layers.Input(shape=(None,))
group = tf.keras.layers.Input(shape=(None,), name='group')
input_embedding = tf.keras.layers.Embedding(
input_dim=vocab_size, output_dim=embedding_size, mask_zero=True)
embedded = input_embedding(inputs)
projected = embedded
for _ in range(num_lstm_layers):
layer = tf.keras.layers.LSTM(lstm_size, return_sequences=True)
processed = layer(projected)
projected = tf.keras.layers.Dense(embedding_size)(processed)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
logits = tf.keras.layers.Dense(vocab_size, activation=None)(projected)
return tf.keras.Model(inputs=[inputs, group], outputs=logits)
def create_recurrent_merge_branch_model(
vocab_size: int,
embedding_size: int = 96,
num_lstm_layers: int = 1,
lstm_size: int = 670,
shared_embedding: bool = False) -> tf.keras.Model:
"""Constructs a recurrent model with an initial embeding layer.
The output is
the average of the two branches, to obtain the same capacity as the dual
branch model.
The resulting model embeds sequences of integer tokens (whose values vary
between `0` and `vocab_size-1`) into an `embedding_size`-dimensional space.
It then applies `num_lstm_layers` LSTM layers, each of size `lstm_size`.
Each LSTM is followed by a dense layer mapping the output to `embedding_size`
units. The model then has a final dense layer mapping to `vocab_size` logits
units. Note that this model does not compute any kind of softmax on the final
logits. This should instead be done in the loss function for the purposes of
backpropagation.
Args:
vocab_size: Vocabulary size to use in the initial embedding layer.
embedding_size: The size of the embedding layer.
num_lstm_layers: The number of LSTM layers in the model.
lstm_size: The size of each LSTM layer.
shared_embedding: If set to `True`, the final layer of the model is a dense
layer given by the transposition of the embedding layer. If `False`, the
final dense layer is instead learned separately.
Returns:
An uncompiled `tf.keras.Model`.
"""
if vocab_size < 1:
raise ValueError('vocab_size must be a positive integer.')
if embedding_size < 1:
raise ValueError('embedding_size must be a positive integer.')
if num_lstm_layers < 1:
raise ValueError('num_lstm_layers must be a positive integer.')
if lstm_size < 1:
raise ValueError('lstm_size must be a positive integer.')
inputs = tf.keras.layers.Input(shape=(None,))
group = tf.keras.layers.Input(shape=(None,), name='group')
input_embedding = tf.keras.layers.Embedding(
input_dim=vocab_size, output_dim=embedding_size, mask_zero=True)
embedded = input_embedding(inputs)
projected = embedded
embedding_out_proj_list = [
tf.keras.layers.Dense(embedding_size, name=f'dist_head_{i}')
for i in range(2)
]
for n in range(num_lstm_layers):
layer = tf.keras.layers.LSTM(lstm_size, return_sequences=True)
processed = layer(projected)
if n == num_lstm_layers - 1:
# if output_all_logits:
projected_list = []
for branch in embedding_out_proj_list:
projected_list.append(branch(processed))
else:
projected = tf.keras.layers.Dense(embedding_size)(processed)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
final_proj = transposed_embedding
else:
final_proj = tf.keras.layers.Dense(vocab_size, activation=None)
logits = (final_proj(projected_list[0]) + final_proj(projected_list[1])) * 0.5
return tf.keras.Model(inputs=[inputs, group], outputs=logits)
def create_recurrent_dual_branch_model(
vocab_size: int,
embedding_size: int = 96,
num_lstm_layers: int = 1,
lstm_size: int = 670,
shared_embedding: bool = False) -> tf.keras.Model:
"""Constructs a recurrent model with an initial embeding layer.
The resulting model embeds sequences of integer tokens (whose values vary
between `0` and `vocab_size-1`) into an `embedding_size`-dimensional space.
It then applies `num_lstm_layers` LSTM layers, each of size `lstm_size`.
Each LSTM is followed by a dense layer mapping the output to `embedding_size`
units. The model then has a final dense layer mapping to `vocab_size` logits
units. Note that this model does not compute any kind of softmax on the final
logits. This should instead be done in the loss function for the purposes of
backpropagation.
Args:
vocab_size: Vocabulary size to use in the initial embedding layer.
embedding_size: The size of the embedding layer.
num_lstm_layers: The number of LSTM layers in the model.
lstm_size: The size of each LSTM layer.
shared_embedding: If set to `True`, the final layer of the model is a dense
layer given by the transposition of the embedding layer. If `False`, the
final dense layer is instead learned separately.
Returns:
An uncompiled `tf.keras.Model`.
"""
if vocab_size < 1:
raise ValueError('vocab_size must be a positive integer.')
if embedding_size < 1:
raise ValueError('embedding_size must be a positive integer.')
if num_lstm_layers < 1:
raise ValueError('num_lstm_layers must be a positive integer.')
if lstm_size < 1:
raise ValueError('lstm_size must be a positive integer.')
tokens = tf.keras.layers.Input(shape=(None,), name='tokens')
group = tf.keras.layers.Input(shape=(None,), name='group')
input_embedding = tf.keras.layers.Embedding(
input_dim=vocab_size, output_dim=embedding_size, mask_zero=True)
embedded = input_embedding(tokens)
projected = embedded
embedding_out_proj_list = [
tf.keras.layers.Dense(embedding_size, name=f'dist_head_{i}')
for i in range(2)
]
for n in range(num_lstm_layers):
layer = tf.keras.layers.LSTM(lstm_size, return_sequences=True)
processed = layer(projected)
if n == num_lstm_layers - 1:
# if output_all_logits:
projected_list = []
for branch in embedding_out_proj_list:
projected_list.append(branch(processed))
else:
projected = tf.keras.layers.Dense(embedding_size)(processed)
# whether to output all logis from all heads for later processing
if shared_embedding:
final_proj = TransposableEmbedding(input_embedding)
else:
final_proj = tf.keras.layers.Dense(vocab_size, activation=None)
rets = [final_proj(projected) for projected in projected_list]
# Get the features for K-means clustering
nopad_mask = tf.expand_dims(
tf.cast(tf.math.not_equal(tokens, 0), tf.float32), axis=-1)
n_valid = tf.reduce_sum(nopad_mask, axis=1)
kmeans_feat_list = [
tf.reduce_sum(nopad_mask * pj, axis=1) / (n_valid + 1e-7)
for pj in projected_list
]
rets.append(tf.concat(kmeans_feat_list, axis=1))
return tf.keras.Model(inputs=[tokens, group], outputs=rets)
def create_word_prediction_task_from_datasets(
train_client_spec: tff.simulation.baselines.ClientSpec,
eval_client_spec: Optional[tff.simulation.baselines.ClientSpec],
sequence_length: int,
vocab_size: int,
num_out_of_vocab_buckets: int,
train_data: tff.simulation.datasets.ClientData,
test_data: tff.simulation.datasets.ClientData,
validation_data: tff.simulation.datasets.ClientData,
model_type: str = 'single_branch',
shared_embedding: bool = False,
num_validation_examples: int = 2000,
aggregated_kmeans: bool = False,
label_smooth_w: float = 0.,
label_smooth_eps: float = 0.0,
batch_majority_voting: bool = False,
use_mixed: bool = False,
) -> dist_shift_task.DistShiftTask:
"""Creates a baseline task for next-word prediction on Stack Overflow.
The goal of the task is to take `sequence_length` words from a post and
predict the next word. Here, all posts are drawn from the Stack Overflow
forum, and a client corresponds to a user.
Args:
train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to
preprocess train client data.
eval_client_spec: An optional `tff.simulation.baselines.ClientSpec`
specifying how to preprocess evaluation client data. If set to `None`, the
evaluation datasets will use a batch size of 64 with no extra
preprocessing.
sequence_length: A positive integer dictating the length of each word
sequence in a client's dataset. By default, this is set to
`tff.simulation.baselines.stackoverflow.DEFAULT_SEQUENCE_LENGTH`.
vocab_size: Integer dictating the number of most frequent words in the
entire corpus to use for the task's vocabulary. By default, this is set to
`tff.simulation.baselines.stackoverflow.DEFAULT_WORD_VOCAB_SIZE`.
num_out_of_vocab_buckets: The number of out-of-vocabulary buckets to use.
train_data: A `tff.simulation.datasets.ClientData` used for training.
test_data: A `tff.simulation.datasets.ClientData` used for testing.
validation_data: A `tff.simulation.datasets.ClientData` used for validation.
model_type: Model type, only for the baseline. `single_branch` to train
a single branch model. Otherwise, we take the average of two branches.
shared_embedding: Whether to share the word embedding with the prediction
layer.
num_validation_examples: Max number of validation samples.
aggregated_kmeans: Whether to use aggregated k-means. If set to `True`, we
will create a dual branch model, and use k-means based on the feautres to
select branches in the forward pass.
label_smooth_w: Weight of label smoothing regularization on the unselected
branch. Only effective when `aggregated_kmeans = True`.
label_smooth_eps: Epsilon of the label smoothing for the unselected branch.
The value should be within 0 to 1, where 1 enforces the prediction to be
uniform on all labels, and 0 falls back to cross entropy loss on one-hot
label. The label smoothing regularization is defined as
`L_{CE}(g(x), (1 - epsilon) * y + epsilon * 1/n)`, where L_{CE} is the
cross entropy loss, g(x) is the prediction, epsilon represents the
smoothness. Only effective when `aggregated_kmeans = True`.
batch_majority_voting: Whether to use batch-wise majority voting to select
the branch during test time. If set to True, we select the branch
according to the majority within the minibatch during inference.
Otherwise, we select the branch for each sample. Only effective when
`aggregated_kmeans = True`.
use_mixed: Whether to use the weighted prediction of the two branches.
Weights are defined as the distance to the k-means cluster centers.
Returns:
A `dist_shift_task.DistShiftTask`.
"""
if sequence_length < 1:
raise ValueError('sequence_length must be a positive integer')
if vocab_size < 1:
raise ValueError('vocab_size must be a positive integer')
if num_out_of_vocab_buckets < 1:
raise ValueError('num_out_of_vocab_buckets must be a positive integer')
vocab = list(
tff.simulation.datasets.stackoverflow.load_word_counts(
vocab_size=vocab_size).keys())
if eval_client_spec is None:
eval_client_spec = tff.simulation.baselines.ClientSpec(
num_epochs=1, batch_size=512, shuffle_buffer_size=1)
train_preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
train_client_spec,
vocab,
sequence_length=sequence_length,
num_out_of_vocab_buckets=num_out_of_vocab_buckets)
eval_preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
eval_client_spec,
vocab,
sequence_length=sequence_length,
num_out_of_vocab_buckets=num_out_of_vocab_buckets)
full_validation_set = validation_data.create_tf_dataset_from_all_clients()
if num_validation_examples is not None:
full_validation_set = full_validation_set.take(num_validation_examples)
question_val_set = full_validation_set.filter(filter_qa_fn('question'))
answer_val_set = full_validation_set.filter(filter_qa_fn('answer'))
dataset_dict = {}
dataset_dict['full'] = eval_preprocess_fn(full_validation_set)
dataset_dict['question'] = eval_preprocess_fn(question_val_set)
dataset_dict['answer'] = eval_preprocess_fn(answer_val_set)
task_datasets = dist_shift_task_data.DistShiftDatasets(
train_data=train_data,
test_data=test_data,
validation_data_dict=dataset_dict,
train_preprocess_fn=train_preprocess_fn,
eval_preprocess_fn=eval_preprocess_fn)
special_tokens = word_prediction_preprocessing.get_special_tokens(
vocab_size, num_out_of_vocab_buckets=num_out_of_vocab_buckets)
pad_token = special_tokens.padding
oov_tokens = special_tokens.out_of_vocab
eos_token = special_tokens.end_of_sentence
def metrics_builder():
return [
tff.simulation.baselines.keras_metrics.NumTokensCounter(
masked_tokens=[pad_token]),
tff.simulation.baselines.keras_metrics.MaskedCategoricalAccuracy(
name='accuracy', masked_tokens=[pad_token]),
tff.simulation.baselines.keras_metrics.MaskedCategoricalAccuracy(
name='accuracy_without_out_of_vocab',
masked_tokens=[pad_token] + oov_tokens),
# Notice that the beginning of sentence token never appears in the
# ground truth label.
tff.simulation.baselines.keras_metrics.MaskedCategoricalAccuracy(
name='accuracy_without_out_of_vocab_or_end_of_sentence',
masked_tokens=[pad_token, eos_token] + oov_tokens),
]
# The total vocabulary size is the number of words in the vocabulary, plus
# the number of out-of-vocabulary tokens, plus three tokens used for
# padding, beginning of sentence and end of sentence.
extended_vocab_size = (
vocab_size + special_tokens.get_number_of_special_tokens())
def model_fn() -> tff.learning.Model:
if aggregated_kmeans:
return keras_utils_dual_branch_kmeans_lm.from_keras_model(
keras_model=create_recurrent_dual_branch_model(
vocab_size=extended_vocab_size,
shared_embedding=shared_embedding),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none'),
input_spec=task_datasets.element_type_structure,
metrics=metrics_builder(),
from_logits=True,
uniform_reg=label_smooth_w,
label_smoothing=label_smooth_eps,
batch_majority_voting=batch_majority_voting,
use_mixed=use_mixed)
else:
if model_type == 'single_branch':
model_fn = create_recurrent_model
else:
model_fn = create_recurrent_merge_branch_model
return tff.learning.keras_utils.from_keras_model(
keras_model=model_fn(vocab_size=extended_vocab_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
input_spec=task_datasets.element_type_structure,
metrics=metrics_builder())
return dist_shift_task.DistShiftTask(task_datasets, model_fn)
def cid_qa_filter_fn(cid):
mid_cid = tf.constant(1816184)
def filter_(sample):
if tf.math.less(tf.strings.to_number(cid, mid_cid.dtype), mid_cid):
return tf.math.equal(sample['type'], tf.constant('question'))
else:
return tf.math.equal(sample['type'], tf.constant('answer'))
return filter_
def create_word_prediction_task(
train_client_spec: tff.simulation.baselines.ClientSpec,
eval_client_spec: Optional[tff.simulation.baselines.ClientSpec] = None,
sequence_length: int = constants.DEFAULT_SEQUENCE_LENGTH,
vocab_size: int = constants.DEFAULT_WORD_VOCAB_SIZE,
num_out_of_vocab_buckets: int = 1,
cache_dir: Optional[str] = None,
use_synthetic_data: bool = False,
model_type='share_second',
shared_embedding: bool = False,
num_val_samples: int = 2000,
aggregated_kmeans: bool = False,
label_smooth_w: float = 0.,
label_smooth_eps: float = 1.0,
batch_majority_voting: bool = False,
use_mixed: bool = False,
) -> dist_shift_task.DistShiftTask:
"""Creates a baseline task for next-word prediction on Stack Overflow.
The goal of the task is to take `sequence_length` words from a post and
predict the next word. Here, all posts are drawn from the Stack Overflow
forum, and a client corresponds to a user.
Args:
train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to
preprocess train client data.
eval_client_spec: An optional `tff.simulation.baselines.ClientSpec`
specifying how to preprocess evaluation client data. If set to `None`, the
evaluation datasets will use a batch size of 64 with no extra
preprocessing.
sequence_length: A positive integer dictating the length of each word
sequence in a client's dataset. By default, this is set to
`tff.simulation.baselines.stackoverflow.DEFAULT_SEQUENCE_LENGTH`.
vocab_size: Integer dictating the number of most frequent words in the
entire corpus to use for the task's vocabulary. By default, this is set to
`tff.simulation.baselines.stackoverflow.DEFAULT_WORD_VOCAB_SIZE`.
num_out_of_vocab_buckets: The number of out-of-vocabulary buckets to use.
cache_dir: An optional directory to cache the downloadeded datasets. If
`None`, they will be cached to `~/.tff/`.
use_synthetic_data: A boolean indicating whether to use synthetic Stack
Overflow data. This option should only be used for testing purposes, in
order to avoid downloading the entire Stack Overflow dataset.
model_type: Model type, only for the baseline. `single_branch` to train
a single branch model. Otherwise, we take the average of two branches.
shared_embedding: Whether to share the word embedding with the prediction
layer.
num_val_samples: Max number of validation samples.
aggregated_kmeans: Whether to use aggregated k-means. If set to `True`, we
will create a dual branch model, and use k-means based on the feautres to
select branches in the forward pass.
label_smooth_w: Weight of label smoothing regularization on the unselected
branch. Only effective when `aggregated_kmeans = True`.
label_smooth_eps: Epsilon of the label smoothing for the unselected branch.
The value should be within 0 to 1, where 1 enforces the prediction to be
uniform on all labels, and 0 falls back to cross entropy loss on one-hot
label. The label smoothing regularization is defined as
`L_{CE}(g(x), (1 - epsilon) * y + epsilon * 1/n)`, where L_{CE} is the
cross entropy loss, g(x) is the prediction, epsilon represents the
smoothness. Only effective when `aggregated_kmeans = True`.
batch_majority_voting: Whether to use batch-wise majority voting to select
the branch during test time. If set to True, we select the branch
according to the majority within the minibatch during inference.
Otherwise, we select the branch for each sample. Only effective when
`aggregated_kmeans = True`.
use_mixed: Whether to use the weighted prediction of the two branches.
Weights are defined as the distance to the k-means cluster centers.
Returns:
A `dist_shift_task.DistShiftTask`.
"""
if use_synthetic_data:
synthetic_data = tff.simulation.datasets.stackoverflow.get_synthetic()
stackoverflow_train = synthetic_data
stackoverflow_validation = synthetic_data
stackoverflow_test = synthetic_data
else:
stackoverflow_train, stackoverflow_validation, stackoverflow_test = (
tff.simulation.datasets.stackoverflow.load_data(cache_dir=cache_dir))
return create_word_prediction_task_from_datasets(
train_client_spec,
eval_client_spec,
sequence_length,
vocab_size,
num_out_of_vocab_buckets,
stackoverflow_train,
stackoverflow_test,
stackoverflow_validation,
model_type=model_type,
num_validation_examples=num_val_samples,
shared_embedding=shared_embedding,
aggregated_kmeans=aggregated_kmeans,
label_smooth_w=label_smooth_w,
label_smooth_eps=label_smooth_eps,
batch_majority_voting=batch_majority_voting,
use_mixed=use_mixed,
)
| {
"content_hash": "6f81f79aa97e1f4123a1e5b35492cf2a",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 113,
"avg_line_length": 43.72607879924953,
"alnum_prop": 0.7113618810606711,
"repo_name": "google-research/federated",
"id": "d58c5ec7d6cc316f7c37beb0713f2f8b16aa2649",
"size": "23883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "periodic_distribution_shift/tasks/stackoverflow_nwp_tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76424"
},
{
"name": "Python",
"bytes": "4122952"
},
{
"name": "Shell",
"bytes": "7089"
},
{
"name": "Starlark",
"bytes": "97189"
}
],
"symlink_target": ""
} |
import SimpleHTTPServer
import SocketServer
import logging
import cgi
PORT = 9200
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
logging.error(self.headers)
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
logging.error(self.headers)
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
for item in form.list:
logging.error(item)
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
Handler = ServerHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
| {
"content_hash": "9c6a8655839243b471f6979375ef8b1f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 65,
"avg_line_length": 26.35483870967742,
"alnum_prop": 0.6499388004895961,
"repo_name": "LabAdvComp/osdcquery",
"id": "87e784fd6eb4e60979890f0cf9a60813423648fc",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osdcquery/test/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19357"
}
],
"symlink_target": ""
} |
"""
TODO: module docs
"""
import collections
import sys
import os
import stat
try:
import pgdb
from gppylib.commands.unix import UserId
except ImportError, e:
sys.exit('Error: unable to import module: ' + str(e))
from gppylib import gplog
logger = gplog.get_default_logger()
class ConnectionError(StandardError): pass
class Pgpass():
""" Class for handling .pgpass file.
"""
entries = []
valid_pgpass = True
def __init__(self):
HOME = os.getenv('HOME')
PGPASSFILE = os.getenv('PGPASSFILE', '%s/.pgpass' % HOME)
if not os.path.exists(PGPASSFILE):
return
st_info = os.stat(PGPASSFILE)
mode = str(oct(st_info[stat.ST_MODE] & 0777))
if mode != "0600":
print 'WARNING: password file "%s" has group or world access; permissions should be u=rw (0600) or less' % PGPASSFILE
self.valid_pgpass = False
return
try:
fp = open(PGPASSFILE, 'r')
try:
lineno = 1
for line in fp:
line = line.strip()
if line.startswith('#'):
continue
try:
(hostname, port, database, username, password) = line.strip().split(':')
entry = {'hostname': hostname,
'port': port,
'database': database,
'username': username,
'password': password }
self.entries.append(entry)
except:
print 'Invalid line in .pgpass file. Line number %d' % lineno
lineno += 1
except IOError:
pass
finally:
if fp: fp.close()
except OSError:
pass
def get_password(self, username, hostname, port, database):
for entry in self.entries:
if ((entry['hostname'] == hostname or entry['hostname'] == '*') and
(entry['port'] == str(port) or entry['port'] == '*') and
(entry['database'] == database or entry['database'] == '*') and
(entry['username'] == username or entry['username'] == '*')):
return entry['password']
return None
def pgpass_valid(self):
return self.valid_pgpass
class DbURL:
""" DbURL is used to store all of the data required to get at a PG
or GP database.
"""
pghost='foo'
pgport=5432
pgdb='template1'
pguser='username'
pgpass='pass'
timeout=None
retries=None
def __init__(self,hostname=None,port=0,dbname=None,username=None,password=None,timeout=None,retries=None):
if hostname is None:
self.pghost = os.environ.get('PGHOST', 'localhost')
else:
self.pghost = hostname
if port is 0:
self.pgport = int(os.environ.get('PGPORT', '5432'))
else:
self.pgport = int(port)
if dbname is None:
self.pgdb = os.environ.get('PGDATABASE', 'template1')
else:
self.pgdb = dbname
if username is None:
self.pguser = os.environ.get('PGUSER', os.environ.get('USER', None))
if self.pguser is None:
# fall back to /usr/bin/id
self.pguser = UserId.local('Get uid')
if self.pguser is None or self.pguser == '':
raise Exception('Both $PGUSER and $USER env variables are not set!')
else:
self.pguser = username
if password is None:
pgpass = Pgpass()
if pgpass.pgpass_valid():
password = pgpass.get_password(self.pguser, self.pghost, self.pgport, self.pgdb)
if password:
self.pgpass = password
else:
self.pgpass = os.environ.get('PGPASSWORD', None)
else:
self.pgpass = password
if timeout is not None:
self.timeout = int(timeout)
if retries is None:
self.retries = 1
else:
self.retries = int(retries)
def __str__(self):
# MPP-13617
def canonicalize(s):
if ':' not in s: return s
return '[' + s + ']'
return "%s:%d:%s:%s:%s" % \
(canonicalize(self.pghost),self.pgport,self.pgdb,self.pguser,self.pgpass)
# This wrapper of pgdb provides two useful additions:
# 1. pg notice is accessible to a user of connection returned by dbconn.connect(),
# lifted from the underlying _pg connection
# 2. multiple calls to dbconn.close() should not return an error
class Connection(pgdb.Connection):
def __init__(self, connection):
self._notices = collections.deque(maxlen=100)
# we must do an attribute by attribute copy of the notices here
# due to limitations in pg implementation. Wrap with with a
# namedtuple for ease of use.
def handle_notice(notice):
self._notices.append(notice)
received = {}
for attr in dir(notice):
if attr.startswith('__'):
continue
value = getattr(notice, attr)
received[attr] = value
Notice = collections.namedtuple('Notice', sorted(received))
self._notices.append(Notice(**received))
self._impl = connection
self._impl._cnx.set_notice_receiver(handle_notice)
def __enter__(self):
return self._impl.__enter__()
# __exit__() does not close the connection. This is in line with the
# python DB API v2 specification (pep-0249), where close() is done on
# __del__(), not __exit__().
def __exit__(self, *args):
return self._impl.__exit__(*args)
def __getattr__(self, name):
return getattr(self._impl, name)
def notices(self):
notice_list = list(self._notices)
self._notices.clear()
return notice_list
# don't return operational error if connection is already closed
def close(self):
if not self._impl.closed:
self._impl.close()
def connect(dburl, utility=False, verbose=False,
encoding=None, allowSystemTableMods=False, logConn=True, unsetSearchPath=True):
conninfo = {
'user': dburl.pguser,
'password': dburl.pgpass,
'host': dburl.pghost,
'port': dburl.pgport,
'database': dburl.pgdb,
}
# building options
options = []
if utility:
options.append("-c gp_role=utility")
# unset search path due to CVE-2018-1058
if unsetSearchPath:
options.append("-c search_path=")
if allowSystemTableMods:
options.append("-c allow_system_table_mods=true")
# set client encoding if needed
if encoding:
options.append("-c CLIENT_ENCODING=%s" % encoding)
#by default, libpq will print WARNINGS to stdout
if not verbose:
options.append("-c CLIENT_MIN_MESSAGES=ERROR")
if options:
conninfo['options'] = " ".join(options)
# use specified connection timeout
retries = 1
if dburl.timeout is not None:
conninfo['connect_timeout'] = dburl.timeout
retries = dburl.retries
# This flag helps to avoid logging the connection string in some special
# situations as requested
if logConn:
logFunc = logger.info if dburl.timeout is not None else logger.debug
logFunc("Connecting to db {} on host {}".format(dburl.pgdb, dburl.pghost))
connection = None
for i in range(retries):
try:
connection = pgdb.connect(**conninfo)
break
except pgdb.OperationalError, e:
if 'timeout expired' in str(e):
logger.warning('Timeout expired connecting to %s, attempt %d/%d' % (dburl.pgdb, i+1, retries))
continue
raise
if connection is None:
raise ConnectionError('Failed to connect to %s' % dburl.pgdb)
return Connection(connection)
def execSQL(conn, sql, autocommit=True):
"""
Execute a sql command that is NOT expected to return any rows and expects to commit
immediately.
This function does not return a cursor object, and sets connection.autocommit = autocommit,
which is necessary for queries like "create tablespace" that cannot be run inside a
transaction.
For SQL that captures some expected output, use "query()"
Using with `dbconn.connect() as conn` syntax will override autocommit and complete
queries in a transaction followed by a commit on context close
"""
conn.autocommit = autocommit
with conn.cursor() as cursor:
cursor.execute(sql)
def query(conn, sql):
"""
Run SQL that is expected to return some rows of output
returns a cursor, which can then be used to itirate through all rows
or return them in an array.
"""
cursor=conn.cursor()
cursor.execute(sql)
return cursor
def queryRow(conn, sql):
"""
Run SQL that returns exactly one row, and return that one row
TODO: Handle like gppylib.system.comfigurationImplGpdb.fetchSingleOutputRow().
In the event of the wrong number of rows/columns, some logging would be helpful...
"""
with conn.cursor() as cursor:
cursor.execute(sql)
if cursor.rowcount != 1 :
raise UnexpectedRowsError(1, cursor.rowcount, sql)
res = cursor.fetchone()
return res
class UnexpectedRowsError(Exception):
def __init__(self, expected, actual, sql):
self.expected, self.actual, self.sql = expected, actual, sql
Exception.__init__(self, "SQL retrieved %d rows but %d was expected:\n%s" % \
(self.actual, self.expected, self.sql))
def querySingleton(conn, sql):
"""
Run SQL that returns exactly one row and one column, and return that cell
TODO: Handle like gppylib.system.comfigurationImplGpdb.fetchSingleOutputRow().
In the event of the wrong number of rows/columns, some logging would be helpful...
"""
row = queryRow(conn, sql)
if len(row) > 1:
raise Exception("SQL retrieved %d columns but 1 was expected:\n%s" % \
(len(row), sql))
return row[0]
def executeUpdateOrInsert(conn, sql, expectedRowUpdatesOrInserts):
cursor=conn.cursor()
cursor.execute(sql)
if cursor.rowcount != expectedRowUpdatesOrInserts :
raise Exception("SQL affected %s rows but %s were expected:\n%s" % \
(cursor.rowcount, expectedRowUpdatesOrInserts, sql))
return cursor
| {
"content_hash": "6472d00132f26f510782eea40f265dcc",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 129,
"avg_line_length": 32.05074626865672,
"alnum_prop": 0.5820061469684269,
"repo_name": "jmcatamney/gpdb",
"id": "40caf5c81bea0e2a34a6708755c22f95fd751e75",
"size": "10820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gppylib/db/dbconn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3724"
},
{
"name": "Awk",
"bytes": "836"
},
{
"name": "Batchfile",
"bytes": "12854"
},
{
"name": "C",
"bytes": "42498841"
},
{
"name": "C++",
"bytes": "14366259"
},
{
"name": "CMake",
"bytes": "38452"
},
{
"name": "Csound Score",
"bytes": "223"
},
{
"name": "DTrace",
"bytes": "3873"
},
{
"name": "Dockerfile",
"bytes": "11932"
},
{
"name": "Emacs Lisp",
"bytes": "3488"
},
{
"name": "Fortran",
"bytes": "14863"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "335208"
},
{
"name": "HTML",
"bytes": "53484"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "229556"
},
{
"name": "M4",
"bytes": "111147"
},
{
"name": "Makefile",
"bytes": "496239"
},
{
"name": "Objective-C",
"bytes": "38376"
},
{
"name": "PLpgSQL",
"bytes": "8009512"
},
{
"name": "Perl",
"bytes": "798767"
},
{
"name": "PowerShell",
"bytes": "422"
},
{
"name": "Python",
"bytes": "3000118"
},
{
"name": "Raku",
"bytes": "698"
},
{
"name": "Roff",
"bytes": "32437"
},
{
"name": "Ruby",
"bytes": "77585"
},
{
"name": "SCSS",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "451713"
},
{
"name": "XS",
"bytes": "6983"
},
{
"name": "Yacc",
"bytes": "674092"
},
{
"name": "sed",
"bytes": "1231"
}
],
"symlink_target": ""
} |
import logging
import os
import smtpd
import smtplib
import asyncore
from ConfigParser import ConfigParser
log = logging.getLogger(__name__)
def main():
cp = ConfigParser()
log.info('Read config from: %s',
cp.read([os.path.join(os.environ['HOME'], '.open_relay.ini')]))
host = cp.get('open_relay', 'host')
port = cp.getint('open_relay', 'port')
ssl = cp.getboolean('open_relay', 'ssl')
tls = cp.getboolean('open_relay', 'tls')
username = cp.get('open_relay', 'username')
password = cp.get('open_relay', 'password')
smtp_client = MailClient(host,
port,
ssl, tls,
username, password)
MailServer(('0.0.0.0', 8826), None,
smtp_client=smtp_client)
asyncore.loop()
class MailClient(object):
def __init__(self, host, port, ssl, tls, username, password):
self.host, self.port, self.ssl, self.tls, self.username, self.password = \
host, port, ssl, tls, username, password
self._client = None
self._connect()
def sendmail(self, mailfrom, rcpttos, data):
if str(mailfrom) == 'None':
mailfrom = rcpttos[0]
log.info('Sending mail to %s' % rcpttos)
log.info('Sending mail from %s' % mailfrom)
try:
self._client.sendmail(mailfrom, rcpttos, data)
except:
self._connect()
self._client.sendmail(mailfrom, rcpttos, data)
def _connect(self):
if self.ssl:
self._client = smtplib.SMTP_SSL(self.host, int(self.port))
else:
self._client = smtplib.SMTP(self.host, int(self.port))
if self.tls:
self._client.starttls()
if self.username:
self._client.login(self.username, self.password)
class MailServer(smtpd.SMTPServer):
def __init__(self, *args, **kwargs):
self._client = kwargs.pop('smtp_client')
smtpd.SMTPServer.__init__(self, *args, **kwargs)
def process_message(self, peer, mailfrom, rcpttos, data):
self._client.sendmail(mailfrom, rcpttos, data)
if __name__ == '__main__':
main()
| {
"content_hash": "f72dd68c33ab9999631c2045860cdce5",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 82,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.5739130434782609,
"repo_name": "apache/incubator-allura",
"id": "ba21862ac9556dda8410aa10f4f4f0ca3d69c648",
"size": "3078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/open_relay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155606"
},
{
"name": "JavaScript",
"bytes": "697175"
},
{
"name": "Puppet",
"bytes": "6882"
},
{
"name": "Python",
"bytes": "3667166"
},
{
"name": "Ruby",
"bytes": "5739"
},
{
"name": "Shell",
"bytes": "31675"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2012-2014, Austin Benson and David Gleich
All rights reserved.
This file is part of MRTSQR and is under the BSD 2-Clause License,
which can be found in the LICENSE file in the root directory, or at
http://opensource.org/licenses/BSD-2-Clause
"""
"""
ARInv.py
===========
Compute A * R^{-1}, where A is tall-and-skinny, and R is small
Usage:
dumbo start ARInv.py -hadoop $HADOOP_INSTALL \
-mat [name of matrix file] \
-matpath [local path to small R] \
-matpath2 [optional: extra small matrix] \
-blocksize [optional: block size for compression]
When matpath2 is provided, the computation is
(A * R^{-1}) * R_2^{-1}
This is used in, for example, the pseudo-IR code.
Example usage:
dumbo start ARInv.py -hadoop $HADOOP_INSTALL \
-mat A_800M_10.bseq -matpath R.txt
"""
import mrmc
import dumbo
import util
import os
# create the global options structure
gopts = util.GlobalOptions()
def runner(job):
blocksize = gopts.getintkey('blocksize')
matpath = gopts.getstrkey('matpath')
matpath2 = gopts.getstrkey('matpath2', '')
if matpath2 == '':
matpath2 = None
mapper = mrmc.ARInv(blocksize=blocksize,matpath=matpath, matpath2=matpath2)
reducer = mrmc.ID_REDUCER
job.additer(mapper=mapper,reducer=reducer,opts=[('numreducetasks',str(0))])
def starter(prog):
gopts.prog = prog
mat = mrmc.starter_helper(prog)
if not mat: return "'mat' not specified"
matpath = prog.delopt('matpath')
if not matpath:
return "'matpath' not specified"
prog.addopt('file', os.path.join(os.path.dirname(__file__), matpath))
gopts.getstrkey('matpath', matpath)
matpath2 = prog.delopt('matpath2')
if matpath2:
prog.addopt('file', os.path.join(os.path.dirname(__file__), matpath2))
gopts.getstrkey('matpath2', matpath2)
else:
gopts.getstrkey('matpath2', '')
matname,matext = os.path.splitext(mat)
output = prog.getopt('output')
if not output:
prog.addopt('output','%s-arinv%s' % (matname, matext))
gopts.getintkey('blocksize', 3)
gopts.getstrkey('reduce_schedule', '1')
gopts.save_params()
if __name__ == '__main__':
dumbo.main(runner, starter)
| {
"content_hash": "a26997adf10f2b03c913c1fa12dd173b",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 83,
"avg_line_length": 27.78048780487805,
"alnum_prop": 0.6470588235294118,
"repo_name": "arbenson/mrtsqr",
"id": "e25df1e3d4130d69e5eb15025b0ea0476db43e4e",
"size": "2278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dumbo/ARInv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2012"
},
{
"name": "C++",
"bytes": "76142"
},
{
"name": "Java",
"bytes": "21697"
},
{
"name": "Makefile",
"bytes": "1309"
},
{
"name": "Python",
"bytes": "292758"
},
{
"name": "Shell",
"bytes": "13614"
}
],
"symlink_target": ""
} |
from .importer import activate
__all__ = ('activate',) | {
"content_hash": "c1e16ee4afcbd7fee72758bf0af191a1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.6727272727272727,
"repo_name": "avanov/solo",
"id": "83d4d16e00eb59a3537a3641e71c08c342c96cab",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "solo/import_hooks/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "273"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "162984"
},
{
"name": "RAML",
"bytes": "903"
}
],
"symlink_target": ""
} |
from collections import defaultdict
class BoardCleaner:
def clearBoard(self, board):
self.__clear_flower__(board)
plan = []
dragonSlayed = []
clearedFound = True
while dragonSlayed or clearedFound:
clearedFound = self.__clear_foundation__(board)
self.__clear_flower__(board)
if not clearedFound:
dragonSlayed = self.__clear_dragon__(board)
plan += dragonSlayed
self.__clear_flower__(board)
else:
if not plan or type(plan[-1]) is int:
plan.append([0])
plan[-1][0] += 1
return plan
def __clear_flower__(self, board):
for col in board.tableau:
if col and col[-1].color == 6:
col.pop()
def __clear_dragon__(self, board):
indexInStock = { color: -1 for color in range(3, 6)}
stockDragonIndices = defaultdict(list)
tableauDragonIndices = defaultdict(list)
for i in range(3):
card = board.stock[i]
if card is None:
for color in indexInStock:
if indexInStock[color] == -1:
indexInStock[color] = i
elif card and card.number is None: # Flower cleared, must be Dragon
stockDragonIndices[card.color].append(i)
if indexInStock[card.color] == -1:
indexInStock[card.color] = i
for i, col in enumerate(board.tableau):
if col and col[-1].number is None:
tableauDragonIndices[col[-1].color].append(i)
for color in range(3, 6):
if indexInStock[color] >= 0 and len(stockDragonIndices[color]) + \
len(tableauDragonIndices[color]) == 4:
for i in stockDragonIndices[color]:
board.stock[i] = None
for i in tableauDragonIndices[color]:
board.tableau[i].pop()
board.stock[indexInStock[color]] = []
return [color]
return []
def __clear_foundation__(self, board):
minima = min(board.foundation.values())
colors = filter(lambda x: board.foundation[x] <= minima, range(3))
for col in board.tableau:
if col and col[-1].color in colors and \
col[-1].number == minima + 1:
board.foundation.addToFoundation( col[-1].color )
col.pop()
return True
for color in range(3):
if board.foundation[color] == 1:
for col in board.tableau:
if col and col[-1].color == color and col[-1].number == 2:
board.foundation.addToFoundation( col[-1].color )
col.pop()
return True
return False
| {
"content_hash": "b786d7b3ff82c35eadd2ff75c2eaed65",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 39.2027027027027,
"alnum_prop": 0.5105136159944846,
"repo_name": "davidxk/SolitaireBot",
"id": "9de79d58dc407c0fe083be982ff5dd3ed077a738",
"size": "2901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BoardCleaner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50120"
}
],
"symlink_target": ""
} |
from urllib2 import urlopen
from bs4 import BeautifulSoup
import requests
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
app = QApplication(sys.argv)
URL= 'http://sdpdr.nic.in/annauniv/Authentication'
#get userdata
RegisterNumber = raw_input("Enter the registration number : ")
DateofBirth = raw_input("Enter the date of birth [DD-MM-YYYY] : ")
def main():
# Start a session so we can have persistant cookies
# Session() >> http://docs.python-requests.org/en/latest/api/#request-sessions
session = requests.Session()
# This is the form data that the page sends when logging in
login_data = {
'username': RegisterNumber,
'password': DateofBirth,
'submit': 'id',
}
print login_data
# Authenticate
r = session.post(URL, data = login_data)
# Try accessing a page that requires you to be logged in
r = session.get('http://sdpdr.nic.in/annauniv/result')
web = QWebView()
web.load(QUrl("http://sdpdr.nic.in/annauniv/result"))
#web.show()
printer = QPrinter()
printer.setPageSize(QPrinter.A4)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName("result.pdf")
# convertion of page to pdf format
def convertIt(main):
web.print_(printer)
print "Pdf generated"
QApplication.exit()
QObject.connect(web, SIGNAL("loadFinished(bool)"), convertIt)
sys.exit(app.exec_())
if __name__ == '__main__':# main method
main()
| {
"content_hash": "f1505aa01c1a14d292bdd89e4e8d9a4d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 88,
"avg_line_length": 25.2,
"alnum_prop": 0.6779100529100529,
"repo_name": "ActiveState/code",
"id": "a69803cc535b4e1e64b7e8337fc859cd446abc39",
"size": "1512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/579081_Pythscript_logwebsite_convert_required_html/recipe-579081.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""SCons.Scanner.LaTeX
This module implements the dependency scanner for LaTeX code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/LaTeX.py 3603 2008/10/10 05:46:45 scons"
import os.path
import string
import re
import SCons.Scanner
def LaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with latex.
"""
ds = LaTeX(name = "LaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = ['.eps', '.ps'],
recursive = 0)
return ds
def PDFLaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with pdflatex.
"""
ds = LaTeX(name = "PDFLaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = ['.pdf', '.png', '.jpg', '.gif', '.tif'],
recursive = 0)
return ds
class LaTeX(SCons.Scanner.Base):
"""Class for scanning LaTeX files for included files.
Unlike most scanners, which use regular expressions that just
return the included file name, this returns a tuple consisting
of the keyword for the inclusion ("include", "includegraphics",
"input", or "bibliography"), and then the file name itself.
Based on a quick look at LaTeX documentation, it seems that we
should append .tex suffix for the "include" keywords, append .tex if
there is no extension for the "input" keyword, and need to add .bib
for the "bibliography" keyword that does not accept extensions by itself.
Finally, if there is no extension for an "includegraphics" keyword
latex will append .ps or .eps to find the file, while pdftex may use .pdf,
.jpg, .tif, .mps, or .png.
The actual subset and search order may be altered by
DeclareGraphicsExtensions command. This complication is ignored.
The default order corresponds to experimentation with teTeX
$ latex --version
pdfeTeX 3.141592-1.21a-2.2 (Web2C 7.5.4)
kpathsea version 3.5.4
The order is:
['.eps', '.ps'] for latex
['.png', '.pdf', '.jpg', '.tif'].
Another difference is that the search path is determined by the type
of the file being searched:
env['TEXINPUTS'] for "input" and "include" keywords
env['TEXINPUTS'] for "includegraphics" keyword
env['BIBINPUTS'] for "bibliography" keyword
env['BSTINPUTS'] for "bibliographystyle" keyword
FIXME: also look for the class or style in document[class|style]{}
FIXME: also look for the argument of bibliographystyle{}
"""
keyword_paths = {'include': 'TEXINPUTS',
'input': 'TEXINPUTS',
'includegraphics': 'TEXINPUTS',
'bibliography': 'BIBINPUTS',
'bibliographystyle': 'BSTINPUTS',
'usepackage': 'TEXINPUTS'}
env_variables = SCons.Util.unique(keyword_paths.values())
def __init__(self, name, suffixes, graphics_extensions, *args, **kw):
# We have to include \n with the % we exclude from the first part
# part of the regex because the expression is compiled with re.M.
# Without the \n, the ^ could match the beginning of a *previous*
# line followed by one or more newline characters (i.e. blank
# lines), interfering with a match on the next line.
regex = r'^[^%\n]*\\(include|includegraphics(?:\[[^\]]+\])?|input|bibliography|usepackage){([^}]*)}'
self.cre = re.compile(regex, re.M)
self.graphics_extensions = graphics_extensions
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
class FindMultiPathDirs:
"""The stock FindPathDirs function has the wrong granularity:
it is called once per target, while we need the path that depends
on what kind of included files is being searched. This wrapper
hides multiple instances of FindPathDirs, one per the LaTeX path
variable in the environment. When invoked, the function calculates
and returns all the required paths as a dictionary (converted into
a tuple to become hashable). Then the scan function converts it
back and uses a dictionary of tuples rather than a single tuple
of paths.
"""
def __init__(self, dictionary):
self.dictionary = {}
for k,n in dictionary.items():
self.dictionary[k] = SCons.Scanner.FindPathDirs(n)
def __call__(self, env, dir=None, target=None, source=None,
argument=None):
di = {}
for k,c in self.dictionary.items():
di[k] = c(env, dir=None, target=None, source=None,
argument=None)
# To prevent "dict is not hashable error"
return tuple(di.items())
class LaTeXScanCheck:
"""Skip all but LaTeX source files, i.e., do not scan *.eps,
*.pdf, *.jpg, etc.
"""
def __init__(self, suffixes):
self.suffixes = suffixes
def __call__(self, node, env):
current = not node.has_builder() or node.is_up_to_date()
scannable = node.get_suffix() in env.subst_list(self.suffixes)[0]
# Returning false means that the file is not scanned.
return scannable and current
kw['function'] = _scan
kw['path_function'] = FindMultiPathDirs(LaTeX.keyword_paths)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['scan_check'] = LaTeXScanCheck(suffixes)
kw['name'] = name
apply(SCons.Scanner.Base.__init__, (self,) + args, kw)
def _latex_names(self, include):
filename = include[1]
if include[0] == 'input':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.tex']
if (include[0] == 'include'):
return [filename + '.tex']
if include[0] == 'bibliography':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.bib']
if include[0] == 'usepackage':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.sty']
if include[0] == 'includegraphics':
base, ext = os.path.splitext( filename )
if ext == "":
#TODO(1.5) return [filename + e for e in self.graphics_extensions]
return map(lambda e, f=filename: f+e, self.graphics_extensions)
return [filename]
def sort_key(self, include):
return SCons.Node.FS._my_normcase(str(include))
def find_include(self, include, source_dir, path):
try:
sub_path = path[include[0]]
except (IndexError, KeyError):
sub_path = ()
try_names = self._latex_names(include)
for n in try_names:
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path)
if i:
return i, include
return i, include
def scan(self, node, path=()):
# Modify the default scan function to allow for the regular
# expression to return a comma separated list of file names
# as can be the case with the bibliography keyword.
# Cache the includes list in node so we only scan it once:
path_dict = dict(list(path))
noopt_cre = re.compile('\[.*$')
if node.includes != None:
includes = node.includes
else:
includes = self.cre.findall(node.get_contents())
# 1. Split comma-separated lines, e.g.
# ('bibliography', 'phys,comp')
# should become two entries
# ('bibliography', 'phys')
# ('bibliography', 'comp')
# 2. Remove the options, e.g., such as
# ('includegraphics[clip,width=0.7\\linewidth]', 'picture.eps')
# should become
# ('includegraphics', 'picture.eps')
split_includes = []
for include in includes:
inc_type = noopt_cre.sub('', include[0])
inc_list = string.split(include[1],',')
for j in range(len(inc_list)):
split_includes.append( (inc_type, inc_list[j]) )
#
includes = split_includes
node.includes = includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the \include, \input, etc. line.
# TODO: what about the comment in the original Classic scanner:
# """which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally."""
nodes = []
source_dir = node.get_dir()
for include in includes:
#
# Handle multiple filenames in include[1]
#
n, i = self.find_include(include, source_dir, path_dict)
if n is None:
# Do not bother with 'usepackage' warnings, as they most
# likely refer to system-level files
if include[0] != 'usepackage':
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(n)
nodes.append((sortkey, n))
#
nodes.sort()
nodes = map(lambda pair: pair[1], nodes)
return nodes
| {
"content_hash": "2bad87f20d38d3122d89a647b4331967",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 129,
"avg_line_length": 43.00763358778626,
"alnum_prop": 0.589368122115726,
"repo_name": "frew/simpleproto",
"id": "fefeb14a83cb0c6439d5ef5804916e31144b59db",
"size": "11268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scons-local-1.1.0/SCons/Scanner/LaTeX.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "30217"
},
{
"name": "Protocol Buffer",
"bytes": "1960"
},
{
"name": "Python",
"bytes": "1704215"
}
],
"symlink_target": ""
} |
import sys
from oslo_config import cfg
from oslo_service import service
from neutron.agent.common import config
from neutron.agent.l3 import ha
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import pd
from neutron.agent.linux import ra
from neutron.agent.metadata import config as metadata_config
from neutron.common import config as common_config
from neutron.common import topics
from neutron.conf.agent.l3 import config as l3_config
from neutron import service as neutron_service
def register_opts(conf):
l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf)
conf.register_opts(metadata_config.DRIVER_OPTS)
conf.register_opts(metadata_config.SHARED_OPTS)
conf.register_opts(ha.OPTS)
config.register_interface_driver_opts_helper(conf)
config.register_agent_state_opts_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
conf.register_opts(pd.OPTS)
conf.register_opts(ra.OPTS)
config.register_availability_zone_opts_helper(conf)
def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'):
register_opts(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
server = neutron_service.Service.create(
binary='neutron-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(cfg.CONF, server).wait()
| {
"content_hash": "1240a2231c5a9f5337b39d19a3018e76",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 69,
"avg_line_length": 35.54761904761905,
"alnum_prop": 0.7642330877427997,
"repo_name": "igor-toga/local-snat",
"id": "72f6c8417983ed20289e87aab1c077f69b6837b5",
"size": "2136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/agent/l3_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9636936"
},
{
"name": "Shell",
"bytes": "14072"
}
],
"symlink_target": ""
} |
"""
The ssh_autodetect module is used to auto-detect the netmiko device_type to use to further initiate
a new SSH connection with a remote host. This auto-detection is based on a unique class called
**SSHDetect**.
Notes
-----
The **SSHDetect** class is instantiated using the same parameters than a standard Netmiko
connection (see the *netmiko.ssh_dispatacher.ConnectHandler* function). The only acceptable value
for the 'device_type' argument is 'autodetect'.
The auto-detection is solely based on the *SSH_MAPPER_BASE* dictionary. The keys are the name of
the 'device_type' supported for auto-detection and the value is another dictionary describing how
to handle the auto-detection.
* "cmd" : The command to send to the remote device. **The command output must not require paging.**
* "search_patterns" : A list of regex to compare with the output of the command
* "priority" : An integer (0-99) which specifies the confidence of the match above
* "dispatch" : The function to call to try the autodetection (per default SSHDetect._autodetect_std)
Examples
--------
# Auto-detection section
>>> from netmiko.ssh_autodetect import SSHDetect
>>> from netmiko.ssh_dispatcher import ConnectHandler
>>> remote_device = {'device_type': 'autodetect',
'host': 'remote.host',
'username': 'test',
'password': 'foo'}
>>> guesser = SSHDetect(**remote_device)
>>> best_match = guesser.autodetect()
>>> print(best_match) # Name of the best device_type to use further
>>> print(guesser.potential_matches) # Dictionary of the whole matching result
# Netmiko connection creation section
>>> remote_device['device_type'] = best_match
>>> connection = ConnectHandler(**remote_device)
"""
from __future__ import unicode_literals
import re
import time
from netmiko.ssh_dispatcher import ConnectHandler
from netmiko.base_connection import BaseConnection
# 'dispatch' key is the SSHDetect method to call. dispatch key will be popped off dictionary
# remaining keys indicate kwargs that will be passed to dispatch method.
SSH_MAPPER_BASE = {
'alcatel_aos': {
"cmd": "show system",
"search_patterns": ["Alcatel-Lucent"],
"priority": 99,
"dispatch": "_autodetect_std",
},
'alcatel_sros': {
"cmd": "show version | match ALCATEL",
"search_patterns": ["TiMOS"],
"priority": 99,
"dispatch": "_autodetect_std",
},
'arista_eos': {
"cmd": "show version | inc rist",
"search_patterns": ["Arista"],
"priority": 99,
"dispatch": "_autodetect_std",
},
'cisco_ios': {
"cmd": "show version | inc Cisco",
"search_patterns": [
"Cisco IOS Software",
"Cisco Internetwork Operating System Software"
],
"priority": 99,
"dispatch": "_autodetect_std",
},
'cisco_asa': {
"cmd": "show version | inc Cisco",
"search_patterns": ["Cisco Adaptive Security Appliance", "Cisco ASA"],
"priority": 99,
"dispatch": "_autodetect_std",
},
'cisco_nxos': {
"cmd": "show version | inc Cisco",
"search_patterns": ["Cisco Nexus Operating System", "NX-OS"],
"priority": 99,
"dispatch": "_autodetect_std",
},
'cisco_xr': {
"cmd": "show version | inc Cisco",
"search_patterns": ["Cisco IOS XR"],
"priority": 99,
"dispatch": "_autodetect_std",
},
'huawei': {
"cmd": "display version | inc Huawei",
"search_patterns": ["Huawei Technologies", "Huawei Versatile Routing Platform Software"],
"priority": 99,
"dispatch": "_autodetect_std",
},
'juniper_junos': {
"cmd": "show version | match JUNOS",
"search_patterns": ["JUNOS Software Release", "JUNOS .+ Software"],
"priority": 99,
"dispatch": "_autodetect_std",
},
}
class SSHDetect(object):
"""
The SSHDetect class tries to automatically guess the device type running on the SSH remote end.
Be careful that the kwargs 'device_type' must be set to 'autodetect', otherwise it won't work at
all.
Parameters
----------
*args : list
The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
*kwargs : dict
The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
Attributes
----------
connection : netmiko.terminal_server.TerminalServer
A basic connection to the remote SSH end.
potential_matches: dict
Dict of (device_type, accuracy) that is populated through an interaction with the
remote end.
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the SSHDetect class
"""
if kwargs["device_type"] != "autodetect":
raise ValueError("The connection device_type must be 'autodetect'")
self.connection = ConnectHandler(*args, **kwargs)
# Call the _test_channel_read() in base to clear initial data
output = BaseConnection._test_channel_read(self.connection)
self.initial_buffer = output
self.potential_matches = {}
self._results_cache = {}
def autodetect(self):
"""
Try to guess the best 'device_type' based on patterns defined in SSH_MAPPER_BASE
Returns
-------
best_match : str or None
The device type that is currently the best to use to interact with the device
"""
for device_type, autodetect_dict in SSH_MAPPER_BASE.items():
tmp_dict = autodetect_dict.copy()
call_method = tmp_dict.pop("dispatch")
autodetect_method = getattr(self, call_method)
accuracy = autodetect_method(**tmp_dict)
if accuracy:
self.potential_matches[device_type] = accuracy
if accuracy >= 99: # Stop the loop as we are sure of our match
best_match = sorted(self.potential_matches.items(), key=lambda t: t[1],
reverse=True)
self.connection.disconnect()
return best_match[0][0]
if not self.potential_matches:
self.connection.disconnect()
return None
best_match = sorted(self.potential_matches.items(), key=lambda t: t[1], reverse=True)
self.connection.disconnect()
return best_match[0][0]
def _send_command(self, cmd=""):
"""
Handle reading/writing channel directly. It is also sanitizing the output received.
Parameters
----------
cmd : str, optional
The command to send to the remote device (default : "", just send a new line)
Returns
-------
output : str
The output from the command sent
"""
self.connection.write_channel(cmd + "\n")
time.sleep(1)
output = self.connection._read_channel_timing()
output = self.connection.strip_ansi_escape_codes(output)
output = self.connection.strip_backspaces(output)
return output
def _send_command_wrapper(self, cmd):
"""
Send command to the remote device with a caching feature to avoid sending the same command
twice based on the SSH_MAPPER_BASE dict cmd key.
Parameters
----------
cmd : str
The command to send to the remote device after checking cache.
Returns
-------
response : str
The response from the remote device.
"""
cached_results = self._results_cache.get(cmd)
if not cached_results:
response = self._send_command(cmd)
self._results_cache[cmd] = response
return response
else:
return cached_results
def _autodetect_std(self, cmd="", search_patterns=None, re_flags=re.I, priority=99):
"""
Standard method to try to auto-detect the device type. This method will be called for each
device_type present in SSH_MAPPER_BASE dict ('dispatch' key). It will attempt to send a
command and match some regular expression from the ouput for each entry in SSH_MAPPER_BASE
('cmd' and 'search_pattern' keys).
Parameters
----------
cmd : str
The command to send to the remote device after checking cache.
search_patterns : list
A list of regular expression to look for in the command's output (default: None).
re_flags: re.flags, optional
Any flags from the python re module to modify the regular expression (default: re.I).
priority: int, optional
The confidence the match is right between 0 and 99 (default: 99).
"""
invalid_responses = [
r'% Invalid input detected',
r'syntax error, expecting',
r'Error: Unrecognized command',
r'%Error'
]
if not cmd or not search_patterns:
return 0
try:
response = self._send_command_wrapper(cmd)
# Look for error conditions in output
for pattern in invalid_responses:
match = re.search(pattern, response, flags=re.I)
if match:
return 0
for pattern in search_patterns:
match = re.search(pattern, response, flags=re_flags)
if match:
return priority
except Exception:
return 0
return 0
| {
"content_hash": "13d53baf61f77a04734380333540461d",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 100,
"avg_line_length": 36.573584905660375,
"alnum_prop": 0.6025588113908378,
"repo_name": "michaelrosejr/pyaos6",
"id": "88c9093420e3244d7412f0e408a55ba81798520b",
"size": "9692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "netmiko/ssh_autodetect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186245"
}
],
"symlink_target": ""
} |
from external_program_test_framework import *
class Bash(ExternalProgramTestSuite):
""" Bash test suite
"""
def setup(self):
print('setup')
def teardown(self):
print('teardown')
def valid_pwd_command(self):
if (os.name is "nt"):
self.check_subprocess("cmd", ["/c", "dir"], 0)
else:
self.check_subprocess("pwd", [], 0)
def invalid_ls_command(self):
self.skip_setup = True
self.skip_teardown = True
if (os.name is "nt"):
self.check_subprocess("cmd", ["/c", "0"], 1)
else:
self.check_subprocess("ls", ["0"], 1)
def example_fixture():
def setup():
print 'fixture setup'
def teardown():
print 'fixture teardown'
return setup, teardown
class Dummy(ExternalProgramTestSuite):
def setup(self):
print('setup')
def teardown(self):
print('teardown')
#@skip_teardown
@name(1)
@timelimit('a')
@description("launch the external SUT program and verify that it returns 0")
@fixture(example_fixture)
def valid_sut_launch(self):
self.check_subprocess("pwd", [], 0)
@fixture(example_fixture)
@timelimit(1)
def valid_pwd_command(self):
if (os.name is "nt"):
self.check_subprocess("cmd", ["/c", "cd"], 0)
else:
self.check_subprocess("pwd", [], 0)
class HttpGet(ExternalProgramTestSuite):
def google(self):
self.check_subprocess("curl", ["http://www.google.com"], 0)
def main():
#ExternalProgramTestSuite.color_output_text = False
#Dummy(suite_name='dummy1').run()
Bash(stdout_log_file='run.log', suite_description="Bash Unit Tests", suite_name="Bash")
HttpGet().run()
Dummy(suite_description="dummy unit tests", suite_timelimit=1)
ExternalProgramTestSuite.run_all()
"""
import xhtml2pdf
import six
import html5lib
import markupsafe
import jinja2
from jinja2 import Template
from xhtml2pdf import pisa # import python module
# Define your data
sourceHtml = "<html><body><p>To PDF or not to PDF<p></body></html>"
outputFilename = "test.pdf"
# Utility function
def convertHtmlToPdf(sourceHtml, outputFilename):
# open output file for writing (truncated binary)
resultFile = open(outputFilename, "w+b")
# convert HTML to PDF
pisaStatus = pisa.CreatePDF(
sourceHtml, # the HTML to convert
dest=resultFile) # file handle to recieve result
# close output file
resultFile.close() # close output file
# return True on success and False on errors
return pisaStatus.err
"""
if __name__ == "__main__":
main()
"""
template = Template('Hello {{ name }}!')
print template.render(name='John Doe')
pisa.showLogging()
convertHtmlToPdf(sourceHtml, outputFilename)
""" | {
"content_hash": "68fe8bece01cd790ddd6708db361ccbb",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 91,
"avg_line_length": 28.42718446601942,
"alnum_prop": 0.6058743169398907,
"repo_name": "agramian/PythonExternalProgramTestFramework",
"id": "c27e1a8dfc4042eb727c90649a41d26a637e25ae",
"size": "2928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "Python",
"bytes": "4225034"
}
],
"symlink_target": ""
} |
from _rawffi import alt as _ffi
import _rawffi
from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof
from _ctypes.basics import keepalive_key, store_reference, ensure_objects
from _ctypes.basics import CArgObject, as_ffi_pointer
class ArrayMeta(_CDataMeta):
def __new__(self, name, cls, typedict):
res = type.__new__(self, name, cls, typedict)
if cls == (_CData,): # this is the Array class defined below
return res
ffiarray = res._ffiarray = _rawffi.Array(res._type_._ffishape_)
subletter = getattr(res._type_, '_type_', None)
if subletter == 'c':
def getvalue(self):
return _rawffi.charp2string(self._buffer.buffer,
self._length_)
def setvalue(self, val):
# we don't want to have buffers here
if len(val) > self._length_:
raise ValueError("%r too long" % (val,))
if isinstance(val, str):
_rawffi.rawstring2charp(self._buffer.buffer, val)
else:
for i in range(len(val)):
self[i] = val[i]
if len(val) < self._length_:
self._buffer[len(val)] = b'\x00'
res.value = property(getvalue, setvalue)
def getraw(self):
return _rawffi.charp2rawstring(self._buffer.buffer,
self._length_)
def setraw(self, buffer):
if len(buffer) > self._length_:
raise ValueError("%r too long" % (buffer,))
_rawffi.rawstring2charp(self._buffer.buffer, buffer)
res.raw = property(getraw, setraw)
elif subletter == 'u':
def getvalue(self):
return _rawffi.wcharp2unicode(self._buffer.buffer,
self._length_)
def setvalue(self, val):
# we don't want to have buffers here
if len(val) > self._length_:
raise ValueError("%r too long" % (val,))
if isinstance(val, str):
target = self._buffer
else:
target = self
for i in range(len(val)):
target[i] = val[i]
if len(val) < self._length_:
target[len(val)] = '\x00'
res.value = property(getvalue, setvalue)
res._ffishape_ = (ffiarray, res._length_)
res._fficompositesize_ = res._sizeofinstances()
return res
from_address = cdata_from_address
def _sizeofinstances(self):
size, alignment = self._ffiarray.size_alignment(self._length_)
return size
def _alignmentofinstances(self):
return self._type_._alignmentofinstances()
def _CData_output(self, resarray, base=None, index=-1):
# this seems to be a string if we're array of char, surprise!
from ctypes import c_char, c_wchar
if self._type_ is c_char:
return _rawffi.charp2string(resarray.buffer, self._length_)
if self._type_ is c_wchar:
return _rawffi.wcharp2unicode(resarray.buffer, self._length_)
res = self.__new__(self)
ffiarray = self._ffiarray.fromaddress(resarray.buffer, self._length_)
res._buffer = ffiarray
res._base = base
res._index = index
return res
def _CData_retval(self, resbuffer):
raise NotImplementedError
def from_param(self, value):
# array accepts very strange parameters as part of structure
# or function argument...
from ctypes import c_char, c_wchar
if issubclass(self._type_, (c_char, c_wchar)):
# XXX: this should maybe be stricer for py3 (c_char disallowing str?)
if isinstance(value, (bytes, str)):
if len(value) > self._length_:
raise ValueError("Invalid length")
value = self(*value)
elif not isinstance(value, self):
raise TypeError("expected string, %s found"
% (value.__class__.__name__,))
else:
if isinstance(value, tuple):
if len(value) > self._length_:
raise RuntimeError("Invalid length")
value = self(*value)
return _CDataMeta.from_param(self, value)
def array_get_slice_params(self, index):
if hasattr(self, '_length_'):
start, stop, step = index.indices(self._length_)
else:
step = index.step
if step is None:
step = 1
start = index.start
stop = index.stop
if start is None:
if step > 0:
start = 0
else:
raise ValueError("slice start is required for step < 0")
if stop is None:
raise ValueError("slice stop is required")
return start, stop, step
def array_slice_setitem(self, index, value):
start, stop, step = self._get_slice_params(index)
if ((step < 0 and stop >= start) or
(step > 0 and start >= stop)):
slicelength = 0
elif step < 0:
slicelength = (stop - start + 1) / step + 1
else:
slicelength = (stop - start - 1) / step + 1;
if slicelength != len(value):
raise ValueError("Can only assign slices of the same length")
for i, j in enumerate(range(start, stop, step)):
self[j] = value[i]
def array_slice_getitem(self, index):
start, stop, step = self._get_slice_params(index)
l = [self[i] for i in range(start, stop, step)]
letter = getattr(self._type_, '_type_', None)
if letter == 'c':
return b"".join(l)
if letter == 'u':
return "".join(l)
return l
class Array(_CData, metaclass=ArrayMeta):
_ffiargshape_ = 'P'
def __init__(self, *args):
if not hasattr(self, '_buffer'):
self._buffer = self._ffiarray(self._length_, autofree=True)
for i, arg in enumerate(args):
self[i] = arg
def _fix_index(self, index):
if index < 0:
index += self._length_
if 0 <= index < self._length_:
return index
else:
raise IndexError
_get_slice_params = array_get_slice_params
_slice_getitem = array_slice_getitem
_slice_setitem = array_slice_setitem
def _subarray(self, index):
"""Return a _rawffi array of length 1 whose address is the same as
the index'th item of self."""
address = self._buffer.itemaddress(index)
return self._ffiarray.fromaddress(address, 1)
def __setitem__(self, index, value):
if isinstance(index, slice):
self._slice_setitem(index, value)
return
index = self._fix_index(index)
cobj = self._type_.from_param(value)
if ensure_objects(cobj) is not None:
store_reference(self, index, cobj._objects)
arg = cobj._get_buffer_value()
if self._type_._fficompositesize_ is None:
self._buffer[index] = arg
# something more sophisticated, cannot set field directly
else:
from ctypes import memmove
dest = self._buffer.itemaddress(index)
memmove(dest, arg, self._type_._fficompositesize_)
def __getitem__(self, index):
if isinstance(index, slice):
return self._slice_getitem(index)
index = self._fix_index(index)
return self._type_._CData_output(self._subarray(index), self, index)
def __len__(self):
return self._length_
def _get_buffer_for_param(self):
return CArgObject(self, self._buffer.byptr())
def _get_buffer_value(self):
return self._buffer.buffer
def _to_ffi_param(self):
return self._get_buffer_value()
def _as_ffi_pointer_(self, ffitype):
return as_ffi_pointer(self, ffitype)
ARRAY_CACHE = {}
def create_array_type(base, length):
if not isinstance(length, int):
raise TypeError("Can't multiply a ctypes type by a non-integer")
if length < 0:
raise ValueError("Array length must be >= 0")
key = (base, length)
try:
return ARRAY_CACHE[key]
except KeyError:
name = "%s_Array_%d" % (base.__name__, length)
tpdict = dict(
_length_ = length,
_type_ = base
)
cls = ArrayMeta(name, (Array,), tpdict)
cls._ffiargtype = _ffi.types.Pointer(base.get_ffi_argtype())
ARRAY_CACHE[key] = cls
return cls
| {
"content_hash": "e815ce3e21c31b3e110ad9d185d9ed76",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 82,
"avg_line_length": 36.13333333333333,
"alnum_prop": 0.5486623616236163,
"repo_name": "timm/timmnix",
"id": "ef315bb79e2f51d0fa16103132c9b5b624a01953",
"size": "8672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypy3-v5.5.0-linux64/lib_pypy/_ctypes/array.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1641"
},
{
"name": "Batchfile",
"bytes": "1234"
},
{
"name": "C",
"bytes": "436685"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Common Lisp",
"bytes": "4"
},
{
"name": "Emacs Lisp",
"bytes": "290698"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Makefile",
"bytes": "1681"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1540"
},
{
"name": "Prolog",
"bytes": "14301"
},
{
"name": "Python",
"bytes": "21267592"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "27687"
},
{
"name": "TeX",
"bytes": "3052861"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: memset_server_info
author: "Simon Weald (@glitchcrab)"
version_added: "2.8"
short_description: Retrieve server information.
notes:
- An API key generated via the Memset customer control panel is needed with the
following minimum scope - I(server.info).
description:
- Retrieve server information.
- This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
options:
api_key:
required: true
description:
- The API key obtained from the Memset control panel.
name:
required: true
description:
- The server product name (i.e. C(testyaa1)).
'''
EXAMPLES = '''
- name: get details for testyaa1
memset_server_info:
name: testyaa1
api_key: 5eb86c9896ab03919abcf03857163741
delegate_to: localhost
'''
RETURN = '''
---
memset_api:
description: Info from the Memset API
returned: always
type: complex
contains:
backups:
description: Whether this server has a backup service.
returned: always
type: bool
sample: true
control_panel:
description: Whether the server has a control panel (i.e. cPanel).
returned: always
type: str
sample: 'cpanel'
data_zone:
description: The data zone the server is in.
returned: always
type: str
sample: 'Memset Public Cloud'
expiry_date:
description: Current expiry date of the server.
returned: always
type: str
sample: '2018-08-10'
firewall_rule_group:
description: Details about the firewall group this server is in.
returned: always
type: dict
sample: {
"default_outbound_policy": "RETURN",
"name": "testyaa-fw1",
"nickname": "testyaa cPanel rules",
"notes": "",
"public": false,
"rules": {
"51d7db54d39c3544ef7c48baa0b9944f": {
"action": "ACCEPT",
"comment": "",
"dest_ip6s": "any",
"dest_ips": "any",
"dest_ports": "any",
"direction": "Inbound",
"ip_version": "any",
"ordering": 2,
"protocols": "icmp",
"rule_group_name": "testyaa-fw1",
"rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
"source_ip6s": "any",
"source_ips": "any",
"source_ports": "any"
}
}
}
firewall_type:
description: The type of firewall the server has (i.e. self-managed, managed).
returned: always
type: str
sample: 'managed'
host_name:
description: The server's hostname.
returned: always
type: str
sample: 'testyaa1.miniserver.com'
ignore_monitoring_off:
description: When true, Memset won't remind the customer that monitoring is disabled.
returned: always
type: bool
sample: true
ips:
description: List of dictionaries of all IP addresses assigned to the server.
returned: always
type: list
sample: [
{
"address": "1.2.3.4",
"bytes_in_today": 1000.0,
"bytes_in_yesterday": 2000.0,
"bytes_out_today": 1000.0,
"bytes_out_yesterday": 2000.0
}
]
monitor:
description: Whether the server has monitoring enabled.
returned: always
type: bool
sample: true
monitoring_level:
description: The server's monitoring level (i.e. basic).
returned: always
type: str
sample: 'basic'
name:
description: Server name (same as the service name).
returned: always
type: str
sample: 'testyaa1'
network_zones:
description: The network zone(s) the server is in.
returned: always
type: list
sample: [ 'reading' ]
nickname:
description: Customer-set nickname for the server.
returned: always
type: str
sample: 'database server'
no_auto_reboot:
description: Whether or not to reboot the server if monitoring detects it down.
returned: always
type: bool
sample: true
no_nrpe:
description: Whether Memset should use NRPE to monitor this server.
returned: always
type: bool
sample: true
os:
description: The server's Operating System.
returned: always
type: str
sample: 'debian_stretch_64'
penetration_patrol:
description: Intrusion detection support level for this server.
returned: always
type: str
sample: 'managed'
penetration_patrol_alert_level:
description: The alert level at which notifications are sent.
returned: always
type: int
sample: 10
primary_ip:
description: Server's primary IP.
returned: always
type: str
sample: '1.2.3.4'
renewal_price_amount:
description: Renewal cost for the server.
returned: always
type: str
sample: '30.00'
renewal_price_currency:
description: Currency for renewal payments.
returned: always
type: str
sample: 'GBP'
renewal_price_vat:
description: VAT rate for renewal payments
returned: always
type: str
sample: '20'
start_date:
description: Server's start date.
returned: always
type: str
sample: '2013-04-10'
status:
description: Current status of the server (i.e. live, onhold).
returned: always
type: str
sample: 'LIVE'
support_level:
description: Support level included with the server.
returned: always
type: str
sample: 'managed'
type:
description: What this server is (i.e. dedicated)
returned: always
type: str
sample: 'miniserver'
vlans:
description: Dictionary of tagged and untagged VLANs this server is in.
returned: always
type: dict
sample: {
tagged: [],
untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
}
vulnscan:
description: Vulnerability scanning level.
returned: always
type: str
sample: 'basic'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.memset import memset_api_call
def get_facts(args=None):
'''
Performs a simple API call and returns a JSON blob.
'''
retvals, payload = dict(), dict()
has_changed, has_failed = False, False
msg, stderr, memset_api = None, None, None
payload['name'] = args['name']
api_method = 'server.info'
has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
if has_failed:
# this is the first time the API is called; incorrect credentials will
# manifest themselves at this point so we need to ensure the user is
# informed of the reason.
retvals['failed'] = has_failed
retvals['msg'] = msg
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
return(retvals)
# we don't want to return the same thing twice
msg = None
memset_api = response.json()
retvals['changed'] = has_changed
retvals['failed'] = has_failed
for val in ['msg', 'memset_api']:
if val is not None:
retvals[val] = eval(val)
return(retvals)
def main():
global module
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, type='str', no_log=True),
name=dict(required=True, type='str')
),
supports_check_mode=False
)
if module._name == 'memset_server_facts':
module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'", version='2.13')
# populate the dict with the user-provided vars.
args = dict()
for key, arg in module.params.items():
args[key] = arg
retvals = get_facts(args)
if retvals['failed']:
module.fail_json(**retvals)
else:
module.exit_json(**retvals)
if __name__ == '__main__':
main()
| {
"content_hash": "32543ee542d8016a8724c47eb4ed90b2",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 117,
"avg_line_length": 28.138983050847457,
"alnum_prop": 0.6060715576436574,
"repo_name": "thaim/ansible",
"id": "7642a053922a56b24de890c0f42a9002ea256b14",
"size": "8498",
"binary": false,
"copies": "24",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/memset/memset_server_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from google.appengine.ext import ndb
from user.models import User
class Organization(ndb.Model):
name = ndb.StringProperty()
owner = ndb.KeyProperty(kind=User)
address1 = ndb.StringProperty()
address2 = ndb.StringProperty()
zipcode = ndb.StringProperty()
city = ndb.StringProperty()
state = ndb.StringProperty()
country = ndb.StringProperty()
phone = ndb.StringProperty()
notes = ndb.TextProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def get_by_name(cls, name):
return cls.query(cls.name == name)
@classmethod
def get_by_key(cls, key):
return cls.query(cls.key == ndb.Key(urlsafe = key)).get()
class OrganizationUser(ndb.Model):
user = ndb.KeyProperty(kind=User)
organization = ndb.KeyProperty(kind=Organization)
@classmethod
def fetch_organizations_by_user(cls, user):
return cls.query(cls.user == user).fetch()
@classmethod
def fetch_users_by_organization(cls, organization):
return cls.query(cls.organization == organization).fetch()
@classmethod
def get_combo(cls, user, organization):
return cls.query(cls.user == user and cls.organization == organization).get()
class OU(ndb.Model):
name = ndb.StringProperty()
parent = ndb.KeyProperty()
@classmethod
def get_by_name(cls, name):
return cls.query(cls.name == name)
@classmethod
def get_by_key(cls, key):
return cls.query(cls.key == ndb.Key(urlsafe = key)).get()
@classmethod
def fetch_by_parent(cls, organization):
return cls.query(cls.organization == ndb.Key(urlsafe = organization)).fetch()
| {
"content_hash": "829e8855fa9d2322568383e0a6dcffd4",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 93,
"avg_line_length": 30.160714285714285,
"alnum_prop": 0.6642984014209592,
"repo_name": "ThomasMarcel/ana-tool",
"id": "f73afcacec3b24698cf05b06a1dafb1a37054988",
"size": "1689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "organization/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "668"
},
{
"name": "Python",
"bytes": "52135"
},
{
"name": "Shell",
"bytes": "6406"
}
],
"symlink_target": ""
} |
import nose
from webium.plugins.browser_closer import BrowserCloserPlugin
if __name__ == '__main__':
nose.run_exit(argv=['nosetests', '-v', '--exe',
'tests',
'--with-xunit',
'--xunit-file=webium_xunit_output.xml',
'--whenclose=after_all',
], addplugins=[BrowserCloserPlugin()],)
| {
"content_hash": "013b0dfcf0423ff55345c008cb55794e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 36.81818181818182,
"alnum_prop": 0.4740740740740741,
"repo_name": "drptbl/webium",
"id": "db173160bce47887b63ae0ffd1a994a76125af23",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6616"
},
{
"name": "PowerShell",
"bytes": "519"
},
{
"name": "Python",
"bytes": "34538"
},
{
"name": "Shell",
"bytes": "415"
}
],
"symlink_target": ""
} |
""" Setuptools project configuration for merkletree. """
from os.path import exists
from setuptools import setup
LONG_DESC = None
if exists('README.md'):
with open('README.md', 'r') as file:
LONG_DESC = file.read()
setup(name='merkletree',
version='5.4.0',
author='Jim Dixon',
author_email='jddixon@gmail.com',
long_description=LONG_DESC,
packages=['merkletree'],
package_dir={'': 'src'},
py_modules=[],
include_package_data=False,
zip_safe=False,
scripts=['src/merkleize'],
description='merkle tree library',
url='https://jddixon.github.io/merkletree',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python 2.7',
'Programming Language :: Python 3.5',
'Programming Language :: Python 3.6',
'Programming Language :: Python 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],)
| {
"content_hash": "27bdad88c5ec83e7d4a7ee42fc41685a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 32.911764705882355,
"alnum_prop": 0.5960679177837355,
"repo_name": "jddixon/merkletree",
"id": "51ad061c25a29d5500a6f9bca8c3ebe4f5e9a379",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "84450"
},
{
"name": "Shell",
"bytes": "1750"
}
],
"symlink_target": ""
} |
import datetime
import json
from django import forms
from django.core.urlresolvers import reverse
from django.forms import Widget
from django.utils import formats
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from googleapiclient.discovery import build
import httplib2
from jet.dashboard.modules import DashboardModule
from oauth2client.client import flow_from_clientsecrets, OAuth2Credentials, AccessTokenRefreshError, Storage
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils.encoding import force_text
try:
from django.utils.encoding import force_unicode
except ImportError:
from django.utils.encoding import force_text as force_unicode
try:
from django.forms.utils import flatatt
except ImportError:
from django.forms.util import flatatt
JET_MODULE_GOOGLE_ANALYTICS_CLIENT_SECRETS_FILE = getattr(
settings,
'JET_MODULE_GOOGLE_ANALYTICS_CLIENT_SECRETS_FILE',
''
)
class ModuleCredentialStorage(Storage):
def __init__(self, module):
self.module = module
def locked_get(self):
pass
def locked_put(self, credentials):
pass
def locked_delete(self):
pass
def get(self):
try:
settings = json.loads(self.module.settings)
credential = settings['credential']
return OAuth2Credentials.from_json(credential)
except (ValueError, KeyError):
return None
def put(self, credentials):
self.module.update_settings({'credential': credentials.to_json()})
def delete(self):
self.module.pop_settings(('credential',))
class GoogleAnalyticsClient:
credential = None
analytics_service = None
def __init__(self, storage=None, redirect_uri=None):
self.FLOW = flow_from_clientsecrets(
JET_MODULE_GOOGLE_ANALYTICS_CLIENT_SECRETS_FILE,
scope='https://www.googleapis.com/auth/analytics.readonly',
redirect_uri=redirect_uri
)
if storage is not None:
credential = storage.get()
credential.set_store(storage)
self.set_credential(credential)
def get_oauth_authorize_url(self, state=''):
self.FLOW.params['state'] = state
authorize_url = self.FLOW.step1_get_authorize_url()
return authorize_url
def set_credential(self, credential):
self.credential = credential
self.set_analytics_service(self.credential)
def set_credential_from_request(self, request):
self.set_credential(self.FLOW.step2_exchange(request.GET))
def set_analytics_service(self, credential):
http = httplib2.Http()
http = credential.authorize(http)
self.analytics_service = build('analytics', 'v3', http=http)
def api_profiles(self):
if self.analytics_service is None:
return None, None
try:
profiles = self.analytics_service.management().profiles().list(
accountId='~all',
webPropertyId='~all'
).execute()
return profiles['items'], None
except (TypeError, KeyError) as e:
return None, e
def api_ga(self, profile_id, date1, date2, group=None):
if self.analytics_service is None:
return None, None
if group == 'day':
dimensions = 'ga:date'
elif group == 'week':
dimensions = 'ga:year,ga:week'
elif group == 'month':
dimensions = 'ga:year,ga:month'
else:
dimensions = ''
try:
data = self.analytics_service.data().ga().get(
ids='ga:' + profile_id,
start_date=date1.strftime('%Y-%m-%d'),
end_date=date2.strftime('%Y-%m-%d'),
metrics='ga:users,ga:sessions,ga:pageviews',
dimensions=dimensions
).execute()
return data, None
except TypeError as e:
return None, e
class CredentialWidget(Widget):
module = None
def render(self, name, value, attrs=None):
if value and len(value) > 0:
link = '<a href="%s">%s</a>' % (
reverse('jet-dashboard:google-analytics-revoke', kwargs={'pk': self.module.model.pk}),
force_text(_('Revoke access'))
)
else:
link = '<a href="%s">%s</a>' % (
reverse('jet-dashboard:google-analytics-grant', kwargs={'pk': self.module.model.pk}),
force_text(_('Grant access'))
)
attrs = self.build_attrs({
'type': 'hidden',
'name': 'credential',
})
attrs['value'] = force_unicode(value) if value else ''
return format_html('%s<input{} />' % link, flatatt(attrs))
class GoogleAnalyticsSettingsForm(forms.Form):
credential = forms.CharField(label=_('Access'), widget=CredentialWidget)
counter = forms.ChoiceField(label=_('Counter'))
period = forms.ChoiceField(label=_('Statistics period'), choices=(
(0, _('Today')),
(6, _('Last week')),
(30, _('Last month')),
(31 * 3 - 1, _('Last quarter')),
(364, _('Last year')),
))
def set_module(self, module):
self.fields['credential'].widget.module = module
self.set_counter_choices(module)
def set_counter_choices(self, module):
counters = module.counters()
if counters is not None:
self.fields['counter'].choices = (('', '-- %s --' % force_text(_('none'))),)
self.fields['counter'].choices.extend(map(lambda x: (x['id'], x['websiteUrl']), counters))
else:
label = force_text(_('grant access first')) if module.credential is None else force_text(_('counters loading failed'))
self.fields['counter'].choices = (('', '-- %s -- ' % label),)
class GoogleAnalyticsChartSettingsForm(GoogleAnalyticsSettingsForm):
show = forms.ChoiceField(label=_('Show'), choices=(
('ga:users', capfirst(_('users'))),
('ga:sessions', capfirst(_('sessions'))),
('ga:pageviews', capfirst(_('views'))),
))
group = forms.ChoiceField(label=_('Group'), choices=(
('day', _('By day')),
('week', _('By week')),
('month', _('By month')),
))
class GoogleAnalyticsPeriodVisitorsSettingsForm(GoogleAnalyticsSettingsForm):
group = forms.ChoiceField(label=_('Group'), choices=(
('day', _('By day')),
('week', _('By week')),
('month', _('By month')),
))
class GoogleAnalyticsBase(DashboardModule):
settings_form = GoogleAnalyticsSettingsForm
ajax_load = True
contrast = True
period = None
credential = None
counter = None
error = None
storage = None
def __init__(self, title=None, period=None, **kwargs):
kwargs.update({'period': period})
super(GoogleAnalyticsBase, self).__init__(title, **kwargs)
def settings_dict(self):
return {
'period': self.period,
'credential': self.credential,
'counter': self.counter
}
def load_settings(self, settings):
try:
self.period = int(settings.get('period'))
except TypeError:
self.period = 0
self.credential = settings.get('credential')
self.storage = ModuleCredentialStorage(self.model)
self.counter = settings.get('counter')
def init_with_context(self, context):
raise NotImplementedError('subclasses of GoogleAnalytics must provide a init_with_context() method')
def counters(self):
try:
client = GoogleAnalyticsClient(self.storage)
profiles, exception = client.api_profiles()
return profiles
except Exception:
return None
def get_grouped_date(self, data, group):
if group == 'week':
date = datetime.datetime.strptime(
'%s-%s-%s' % (data['ga_year'], data['ga_week'], '0'),
'%Y-%W-%w'
)
elif group == 'month':
date = datetime.datetime.strptime(data['ga_year'] + data['ga_month'], '%Y%m')
else:
date = datetime.datetime.strptime(data['ga_date'], '%Y%m%d')
return date
def format_grouped_date(self, data, group):
date = self.get_grouped_date(data, group)
if group == 'week':
date = u'%s — %s' % (
(date - datetime.timedelta(days=6)).strftime('%d.%m'),
date.strftime('%d.%m')
)
elif group == 'month':
date = date.strftime('%b, %Y')
else:
date = formats.date_format(date, 'DATE_FORMAT')
return date
def counter_attached(self):
if self.credential is None:
self.error = mark_safe(_('Please <a href="%s">attach Google account and choose Google Analytics counter</a> to start using widget') % reverse('jet-dashboard:update_module', kwargs={'pk': self.model.pk}))
return False
elif self.counter is None:
self.error = mark_safe(_('Please <a href="%s">select Google Analytics counter</a> to start using widget') % reverse('jet-dashboard:update_module', kwargs={'pk': self.model.pk}))
return False
else:
return True
def api_ga(self, group=None):
if self.counter_attached():
date1 = datetime.datetime.now() - datetime.timedelta(days=self.period)
date2 = datetime.datetime.now()
try:
client = GoogleAnalyticsClient(self.storage)
result, exception = client.api_ga(self.counter, date1, date2, group)
if exception is not None:
raise exception
return result
except Exception as e:
error = _('API request failed.')
if isinstance(e, AccessTokenRefreshError):
error += _(' Try to <a href="%s">revoke and grant access</a> again') % reverse('jet-dashboard:update_module', kwargs={'pk': self.model.pk})
self.error = mark_safe(error)
class GoogleAnalyticsVisitorsTotals(GoogleAnalyticsBase):
"""
Google Analytics widget that shows total number of users, sessions and viewers for a particular period of time.
Period may be following: Today, Last week, Last month, Last quarter, Last year
"""
title = _('Google Analytics visitors totals')
template = 'jet.dashboard/modules/google_analytics_visitors_totals.html'
#: Which period should be displayed. Allowed values - integer of days
period = None
def __init__(self, title=None, period=None, **kwargs):
kwargs.update({'period': period})
super(GoogleAnalyticsVisitorsTotals, self).__init__(title, **kwargs)
def init_with_context(self, context):
result = self.api_ga()
if result is not None:
try:
self.children.append({'title': _('users'), 'value': result['totalsForAllResults']['ga:users']})
self.children.append({'title': _('sessions'), 'value': result['totalsForAllResults']['ga:sessions']})
self.children.append({'title': _('views'), 'value': result['totalsForAllResults']['ga:pageviews']})
except KeyError:
self.error = _('Bad server response')
class GoogleAnalyticsVisitorsChart(GoogleAnalyticsBase):
"""
Google Analytics widget that shows users/sessions/viewer chart for a particular period of time.
Data is grouped by day, week or month
Period may be following: Today, Last week, Last month, Last quarter, Last year
"""
title = _('Google Analytics visitors chart')
template = 'jet.dashboard/modules/google_analytics_visitors_chart.html'
style = 'overflow-x: auto;'
#: Which period should be displayed. Allowed values - integer of days
period = None
#: What data should be shown. Possible values: ``ga:users``, ``ga:sessions``, ``ga:pageviews``
show = None
#: Sets grouping of data. Possible values: ``day``, ``week``, ``month``
group = None
settings_form = GoogleAnalyticsChartSettingsForm
class Media:
js = ('jet.dashboard/vendor/chart.js/Chart.min.js', 'jet.dashboard/dashboard_modules/google_analytics.js')
def __init__(self, title=None, period=None, show=None, group=None, **kwargs):
kwargs.update({'period': period, 'show': show, 'group': group})
super(GoogleAnalyticsVisitorsChart, self).__init__(title, **kwargs)
def settings_dict(self):
settings = super(GoogleAnalyticsVisitorsChart, self).settings_dict()
settings['show'] = self.show
settings['group'] = self.group
return settings
def load_settings(self, settings):
super(GoogleAnalyticsVisitorsChart, self).load_settings(settings)
self.show = settings.get('show')
self.group = settings.get('group')
def init_with_context(self, context):
result = self.api_ga(self.group)
if result is not None:
try:
for data in result['rows']:
row_data = {}
i = 0
for column in result['columnHeaders']:
row_data[column['name'].replace(':', '_')] = data[i]
i += 1
date = self.get_grouped_date(row_data, self.group)
self.children.append((date, row_data[self.show.replace(':', '_')]))
except KeyError:
self.error = _('Bad server response')
class GoogleAnalyticsPeriodVisitors(GoogleAnalyticsBase):
"""
Google Analytics widget that shows users, sessions and viewers for a particular period of time.
Data is grouped by day, week or month
Period may be following: Today, Last week, Last month, Last quarter, Last year
"""
title = _('Google Analytics period visitors')
template = 'jet.dashboard/modules/google_analytics_period_visitors.html'
#: Which period should be displayed. Allowed values - integer of days
period = None
#: Sets grouping of data. Possible values: ``day``, ``week``, ``month``
group = None
contrast = False
settings_form = GoogleAnalyticsPeriodVisitorsSettingsForm
def __init__(self, title=None, period=None, group=None, **kwargs):
kwargs.update({'period': period, 'group': group})
super(GoogleAnalyticsPeriodVisitors, self).__init__(title, **kwargs)
def settings_dict(self):
settings = super(GoogleAnalyticsPeriodVisitors, self).settings_dict()
settings['group'] = self.group
return settings
def load_settings(self, settings):
super(GoogleAnalyticsPeriodVisitors, self).load_settings(settings)
self.group = settings.get('group')
def init_with_context(self, context):
result = self.api_ga(self.group)
if result is not None:
try:
for data in reversed(result['rows']):
row_data = {}
i = 0
for column in result['columnHeaders']:
row_data[column['name'].replace(':', '_')] = data[i]
i += 1
date = self.format_grouped_date(row_data, self.group)
self.children.append((date, row_data))
except KeyError:
self.error = _('Bad server response')
| {
"content_hash": "d21ec3ec379fc96243411141ed35ed13",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 215,
"avg_line_length": 35.64302059496568,
"alnum_prop": 0.5970724191063174,
"repo_name": "huanpc/IoT-1",
"id": "2e6c43efe9a1d371445efd13883c64db07e55418",
"size": "15596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gui/controller/.venv/lib/python3.5/site-packages/jet/dashboard/dashboard_modules/google_analytics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "447"
},
{
"name": "CSS",
"bytes": "175618"
},
{
"name": "HTML",
"bytes": "225304"
},
{
"name": "Java",
"bytes": "1746124"
},
{
"name": "JavaScript",
"bytes": "249520"
},
{
"name": "Python",
"bytes": "6778003"
},
{
"name": "Shell",
"bytes": "16840"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mock
from django.core import mail
from django.utils import timezone
from exam import fixture
from mock import Mock
from sentry.interfaces.stacktrace import Stacktrace
from sentry.models import Event, Group, Rule
from sentry.plugins import Notification
from sentry.plugins.sentry_mail.models import MailPlugin
from sentry.testutils import TestCase
class MailPluginTest(TestCase):
@fixture
def plugin(self):
return MailPlugin()
@mock.patch('sentry.models.ProjectOption.objects.get_value', Mock(side_effect=lambda p, k, d: d))
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin.get_sendable_users', Mock(return_value=[]))
def test_should_notify_no_sendable_users(self):
assert not self.plugin.should_notify(group=Mock(), event=Mock())
def test_simple_notification(self):
group = self.create_group(message='Hello world')
event = self.create_event(group=group, message='Hello world')
rule = Rule.objects.create(project=self.project, label='my rule')
notification = Notification(event=event, rule=rule)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
msg = mail.outbox[0]
assert msg.subject == '[Sentry] [foo Bar] ERROR: Hello world'
print dir(msg)
assert 'my rule' in msg.alternatives[0][0]
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_renders_interfaces_with_utf8(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
)
stacktrace = Mock(spec=Stacktrace)
stacktrace.to_email_html.return_value = u'רונית מגן'
stacktrace.get_title.return_value = 'Stacktrace'
event = Event()
event.group = group
event.project = self.project
event.message = 'hello world'
event.interfaces = {'sentry.interfaces.Stacktrace': stacktrace}
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
stacktrace.get_title.assert_called_once_with()
stacktrace.to_email_html.assert_called_once_with(event)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_renders_interfaces_with_utf8_fix_issue_422(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
)
stacktrace = Mock(spec=Stacktrace)
stacktrace.to_email_html.return_value = u'רונית מגן'
stacktrace.get_title.return_value = 'Stacktrace'
event = Event()
event.group = group
event.project = self.project
event.message = 'Soubor ji\xc5\xbe existuje'
event.interfaces = {'sentry.interfaces.Stacktrace': stacktrace}
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
stacktrace.get_title.assert_called_once_with()
stacktrace.to_email_html.assert_called_once_with(event)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_does_email(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
message='hello world',
logger='root',
)
event = Event(
group=group,
message=group.message,
project=self.project,
datetime=group.last_seen,
)
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
assert _send_mail.call_count is 1
args, kwargs = _send_mail.call_args
self.assertEquals(kwargs.get('project'), self.project)
self.assertEquals(kwargs.get('group'), group)
assert kwargs.get('subject') == u"[{0} {1}] ERROR: hello world".format(
self.team.name, self.project.name)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_multiline_error(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
message='hello world\nfoo bar',
logger='root',
)
event = Event(
group=group,
message=group.message,
project=self.project,
datetime=group.last_seen,
)
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
assert _send_mail.call_count is 1
args, kwargs = _send_mail.call_args
assert kwargs.get('subject') == u"[{0} {1}] ERROR: hello world".format(
self.team.name, self.project.name)
def test_get_sendable_users(self):
from sentry.models import UserOption, User
user = self.create_user(email='foo@example.com', is_active=True)
user2 = self.create_user(email='baz@example.com', is_active=True)
self.create_user(email='baz2@example.com', is_active=True)
# user with inactive account
self.create_user(email='bar@example.com', is_active=False)
# user not in any groups
self.create_user(email='bar2@example.com', is_active=True)
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(name='Test', team=team)
organization.member_set.get_or_create(user=user)
organization.member_set.get_or_create(user=user2)
# all members
assert (sorted(set([user.pk, user2.pk])) ==
sorted(self.plugin.get_sendable_users(project)))
# disabled user2
UserOption.objects.create(key='mail:alert', value=0,
project=project, user=user2)
assert user2.pk not in self.plugin.get_sendable_users(project)
user4 = User.objects.create(username='baz4', email='bar@example.com',
is_active=True)
organization.member_set.get_or_create(user=user4)
assert user4.pk in self.plugin.get_sendable_users(project)
# disabled by default user4
uo1 = UserOption.objects.create(key='subscribe_by_default', value='0',
project=project, user=user4)
assert user4.pk not in self.plugin.get_sendable_users(project)
uo1.delete()
UserOption.objects.create(key='subscribe_by_default', value=u'0',
project=project, user=user4)
assert user4.pk not in self.plugin.get_sendable_users(project)
def test_notify_users_with_utf8_subject(self):
group = self.create_group(message=u'רונית מגן')
event = self.create_event(group=group, message='Hello world')
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
msg = mail.outbox[0]
assert msg.subject == u'[Sentry] [foo Bar] ERROR: רונית מגן'
| {
"content_hash": "6ec1f48b2e805755b2c5aa5fb86cddfb",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 105,
"avg_line_length": 35.840375586854464,
"alnum_prop": 0.6266701598113702,
"repo_name": "BayanGroup/sentry",
"id": "24127a016475c88047548a20ac09793cf9c94977",
"size": "7691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/plugins/mail/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "156607"
},
{
"name": "HTML",
"bytes": "188852"
},
{
"name": "JavaScript",
"bytes": "443758"
},
{
"name": "Makefile",
"bytes": "4647"
},
{
"name": "Python",
"bytes": "7069971"
}
],
"symlink_target": ""
} |
"""tensorflow_io.experimental.image"""
from tensorflow_io.python.experimental.image_ops import ( # pylint: disable=unused-import
draw_bounding_boxes,
decode_jpeg_exif,
decode_tiff_info,
decode_tiff,
decode_exr_info,
decode_exr,
decode_pnm,
decode_hdr,
decode_nv12,
decode_yuy2,
decode_avif,
decode_jp2,
decode_obj,
)
| {
"content_hash": "ffd42e905988f9d7bc2698e624302446",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 90,
"avg_line_length": 21.823529411764707,
"alnum_prop": 0.660377358490566,
"repo_name": "tensorflow/io",
"id": "aa678ee0cc4fbd932168895490bc736e0e662387",
"size": "1060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_io/python/api/experimental/image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1583693"
},
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "Dockerfile",
"bytes": "3938"
},
{
"name": "Go",
"bytes": "3757"
},
{
"name": "JavaScript",
"bytes": "6794"
},
{
"name": "Python",
"bytes": "1380386"
},
{
"name": "R",
"bytes": "82002"
},
{
"name": "Shell",
"bytes": "36295"
},
{
"name": "Starlark",
"bytes": "74322"
},
{
"name": "Swift",
"bytes": "19103"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from functools import partial
from image_transformation import *
from kalman_filter import KalmanFilter1D
FONT = cv2.FONT_HERSHEY_SIMPLEX
def identify_points(masked_image):
"""Retrieve the index points for left and right lanes"""
image_shape = masked_image.shape
mid_point = image_shape[1] // 2
ix = masked_image.nonzero()
points = [(x,y) for y,x in zip(ix[0],ix[1])]
left_points = np.array(list(filter(lambda x: x[0] < mid_point, points )))
right_points = np.array(list(filter(lambda x: x[0] >= mid_point, points )))
return left_points, right_points
def draw_lanes(image, lines, color= [255, 0, 0], thickness=10):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
return newwarp
class Line(KalmanFilter1D):
"""
Line is a state that keeps track of lane elements using 1D Kalman Filters
Computes the radius of the curvature after fitting polynomial lanes.
"""
def __init__(self, *args, **kwargs):
# was the line detected in the last iteration?
self.detected = False
# fitted x values of the last n fits of the line
self.x = []
# y. does not change
self.y = np.linspace(0,720,721)
#average x values of the fitted line over the last n iterations
self.wx = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in pixel units
self.radius_of_curvature = None
#the distance of the line to corner of the image
self.base_position = None
# detected line pixels coordinates
# this is the raw data
self.pts = []
# For real world transformation
# meters per pixel in y dimension
self.ym_per_pix = 30/720
# 700 because it is the distance between lanes
# meters per pixel in x dimension.
self.xm_per_pix = 3.7/700
self.detection_count = (0,0) # True, False
self.rejected_images = 0
##Kalman Filter
super(Line, self).__init__(*args, **kwargs)
def set_curve_radius(self, order=2):
"""Curve Radius Based on Smoothed Lane Lines"""
fit_cr = np.polyfit(self.y * self.ym_per_pix, self.state * self.xm_per_pix, order)
y_max = np.max(self.y)
curverad = ((1 + (2*fit_cr[0]*y_max + fit_cr[1])**2)**1.5) / abs(2*fit_cr[0])
self.radius_of_curvature = curverad
def fit_poly_lanes(self, order = 2, pixel_curvature = True):
"""Fit a polynomial based on current points"""
# poly fit fit(y,x) - reversed for this particular problem
_fit = np.polyfit(self.pts[:,1], self.pts[:,0],order)
self.current_fit = _fit
def set_fitted_x(self):
a,b,c = self.current_fit
self.x = a * self.y**2 + b * self.y+ c
def set_base_position(self):
""" Base position with respect to zero value on x-axis"""
# Last state x corresponds to a y-value at max.
self.base_position = self.state[-1]
def increment_detection_count(self):
"""Increment the detection count - times input was available"""
_cnt = self.detection_count
if len(self.pts) > 0:
# Success
self.detection_count = (_cnt[0]+1, _cnt[1])
self.detected = True
else:
# Failure
self.detection_count = (_cnt[0], _cnt[1]+1)
self.detected = False
def reject_crossing_pixels(self, next_filter):
# Compare fits. If crossing occurs reject.
if (next_filter.state < self.x).all() or (next_filter.state > self.x).all():
return False
else:
return True
def evaluate(self, next_filter, threshold = 100):
# Account for initial pixels. warmup. 1 sec.
# Proposed regions should stay inside the image.
if self.detection_count[0] >= 25:
# Reject if baseline and top are pixels too far away from current state
# Reject if x coordinates cross
if abs(self.x[-1] - self.state[-1]) > threshold or abs(self.x[0] - self.state[0]) > threshold:
print("reject proposal {} state {} ".format(self.x[-1], self.state[-1]))
print("reject proposal {} state {} ".format(self.x[0], self.state[0]))
#print("kalman noise {}".format(self.noise))
self.rejected_images += 1
self.detected = False
if self.reject_crossing_pixels(next_filter):
print("reject crossing pixels")
self.rejected_images += 1
self.detected = False
if min(self.x) < 0 or max(self.x) > 1280:
print("proposed region outside")
self.rejected_images += 1
self.detected = False
def set_weighted_x(self, w = 0.9):
# Initial state
if self.detection_count[0]<=10:
self.state = self.x
else:
self.state = w * self.state+ (1-w) * self.x
def process_image(self, pts, next_filter):
self.pts = pts
self.increment_detection_count()
if len(self.pts) > 0:
self.fit_poly_lanes()
self.set_fitted_x()
# Reject if incoming polynomial fit is too far away from current state
# or pixels from current proposal crosses the state of
# the previous line state
self.evaluate(next_filter)
if self.detected:
# kalman update baseline next step
self.update(self.x)
# Using updated step, calculate the following:
# curvature update
self.set_curve_radius()
# base position update
self.set_base_position()
else:
# If no points found, predict the next step
self.predict()
else:
# If no points found, predict the next step
self.predict()
print(np.mean(self.state))
print("*****")
def overlay_detected_lane(img, transformer, warped, left, right, show_weighted = True):
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
pts_left = np.array([np.transpose(np.vstack([left.state, left.y]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right.state, right.y])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Inverse Transformation
M_inv = transformer.perspective_transformer.M_inv
new_warp = cv2.warpPerspective(color_warp, M_inv, (warped.shape[1], warped.shape[0]))
# distort back
new_img = cv2.addWeighted(img, 1, new_warp, 0.4, 0)
# annotate curvature
mean_curvature = np.mean([left.radius_of_curvature, right.radius_of_curvature])
_str = "radius of curvature: {} km".format(round(mean_curvature / 1e3,2))
cv2.putText(new_img, _str, (10,40), FONT, 1,(255,255,255), 2, cv2.LINE_AA)
# vehicle distance from the center of the image
_mid = abs((img.shape[1] / 2) - ((left.base_position + right.base_position) / 2)) * left.xm_per_pix
_str = "distance from center: {} m".format(round(_mid, 3))
cv2.putText(new_img, _str, (10,70), FONT, 1,(255,255,255), 2, cv2.LINE_AA)
# detection
_str = "lane detected? Left: {} Right: {}".format(left.detected, right.detected)
cv2.putText(new_img, _str, (10,100), FONT, 1,(255,255,255), 2, cv2.LINE_AA)
# top pixel
# _str = "top pixel: Left -> {} Right -> {}".format(round(left.state[0],1), round(right.state[0],1))
# cv2.putText(new_img, _str, (10,130), FONT, 1,(255,255,255), 2, cv2.LINE_AA)
return new_img
def process(img, transformer, left, right):
warped_image = transformer.transform(img)
# separete points into left and right
left_points, right_points = identify_points(warped_image)
# update step. process each half separately.
left.process_image(left_points, right)
right.process_image(right_points, left)
# draw - recast the x and y points into usable format for cv2.fillPoly()
und = transformer.undistort_image(img)
new_img = overlay_detected_lane(und, transformer, warped_image, left, right)
return new_img
if __name__ == '__main__':
in_file = arguments['<input_video>']
out_file = arguments['<output_video>']
left, right = Line(), Line()
print("Prepare the transformation pipeline for image preprocessing")
transformer = setup_transformation_pipeline()
fun = lambda x: process(x, transformer, left, right)
print('Processing video ...')
clip2 = VideoFileClip(in_file)
vid_clip = clip2.fl_image(warp)
vid_clip.write_videofile(out_file, audio=False)
| {
"content_hash": "bec21871ee63889d584d1850dbfc73a8",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 106,
"avg_line_length": 41.38528138528139,
"alnum_prop": 0.6106694560669456,
"repo_name": "dzorlu/sdc",
"id": "a59a28aa34693b5bf392e7023133fd96f1081e08",
"size": "9560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "advanced_lane_detection/lane_detection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "351494"
},
{
"name": "C++",
"bytes": "16386318"
},
{
"name": "CMake",
"bytes": "270470"
},
{
"name": "CSS",
"bytes": "5383"
},
{
"name": "Cuda",
"bytes": "131738"
},
{
"name": "Fortran",
"bytes": "1326303"
},
{
"name": "HTML",
"bytes": "1504171"
},
{
"name": "JavaScript",
"bytes": "7839"
},
{
"name": "Jupyter Notebook",
"bytes": "18788347"
},
{
"name": "Makefile",
"bytes": "224292"
},
{
"name": "Python",
"bytes": "85708"
},
{
"name": "Shell",
"bytes": "23876"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from kitsune.sumo.models import LocaleField, ModelBase
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files
class Media(ModelBase):
"""Generic model for media"""
title = models.CharField(max_length=255, db_index=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
updated = models.DateTimeField(default=datetime.now, db_index=True)
updated_by = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
description = models.TextField(max_length=10000)
locale = LocaleField(default=settings.GALLERY_DEFAULT_LANGUAGE, db_index=True)
is_draft = models.BooleanField(default=None, null=True, editable=False)
class Meta(object):
abstract = True
ordering = ["-created"]
unique_together = (("locale", "title"), ("is_draft", "creator"))
def __str__(self):
return "[%s] %s" % (self.locale, self.title)
@auto_delete_files
class Image(Media):
creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name="gallery_images")
file = models.ImageField(
upload_to=settings.GALLERY_IMAGE_PATH,
max_length=settings.MAX_FILEPATH_LENGTH,
width_field="width",
height_field="height",
)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_IMAGE_THUMBNAIL_PATH,
null=True,
max_length=settings.MAX_FILEPATH_LENGTH,
)
height = models.IntegerField(null=True)
width = models.IntegerField(null=True)
def get_absolute_url(self):
return reverse("gallery.media", args=["image", self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail, if set, else self.file"""
return self.thumbnail.url if self.thumbnail else self.file.url
@property
def documents(self):
"""Get the documents that include this image."""
from kitsune.wiki.models import Document
return Document.objects.filter(documentimage__image=self)
@auto_delete_files
class Video(Media):
creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name="gallery_videos")
webm = models.FileField(
upload_to=settings.GALLERY_VIDEO_PATH, null=True, max_length=settings.MAX_FILEPATH_LENGTH
)
ogv = models.FileField(
upload_to=settings.GALLERY_VIDEO_PATH, null=True, max_length=settings.MAX_FILEPATH_LENGTH
)
flv = models.FileField(
upload_to=settings.GALLERY_VIDEO_PATH, null=True, max_length=settings.MAX_FILEPATH_LENGTH
)
poster = models.ImageField(
upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH,
max_length=settings.MAX_FILEPATH_LENGTH,
null=True,
)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH,
null=True,
max_length=settings.MAX_FILEPATH_LENGTH,
)
def get_absolute_url(self):
return reverse("gallery.media", args=["video", self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail.url, if set, else default thumbnail URL"""
progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL
return self.thumbnail.url if self.thumbnail else progress_url
| {
"content_hash": "14fb6ecfa39d291f57b89fbf4637a7b9",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 97,
"avg_line_length": 36,
"alnum_prop": 0.6905615292712067,
"repo_name": "mozilla/kitsune",
"id": "d05859590a0617a0d005153adfe4f2a0ec638f13",
"size": "3348",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "kitsune/gallery/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "Dockerfile",
"bytes": "3027"
},
{
"name": "HTML",
"bytes": "535448"
},
{
"name": "JavaScript",
"bytes": "658477"
},
{
"name": "Jinja",
"bytes": "4837"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Nunjucks",
"bytes": "68656"
},
{
"name": "Python",
"bytes": "2827116"
},
{
"name": "SCSS",
"bytes": "240092"
},
{
"name": "Shell",
"bytes": "10759"
},
{
"name": "Svelte",
"bytes": "26864"
}
],
"symlink_target": ""
} |
"""
CloudPipe - Build a user-data payload zip file, and launch
an instance with it.
"""
import os
import string
import zipfile
from nova import compute
from nova.compute import instance_types
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
cloudpipe_opts = [
cfg.StrOpt('vpn_instance_type',
default='m1.tiny',
help=_('Instance type for vpn instances')),
cfg.StrOpt('boot_script_template',
default='$pybasedir/nova/cloudpipe/bootscript.template',
help=_('Template for cloudpipe instance boot script')),
cfg.StrOpt('dmz_net',
default='10.0.0.0',
help=_('Network to push into openvpn config')),
cfg.StrOpt('dmz_mask',
default='255.255.255.0',
help=_('Netmask to push into openvpn config')),
]
flags.DECLARE('cnt_vpn_clients', 'nova.network.manager')
FLAGS = flags.FLAGS
FLAGS.register_opts(cloudpipe_opts)
LOG = logging.getLogger(__name__)
class CloudPipe(object):
def __init__(self):
self.compute_api = compute.API()
def get_encoded_zip(self, project_id):
# Make a payload.zip
with utils.tempdir() as tmpdir:
filename = "payload.zip"
zippath = os.path.join(tmpdir, filename)
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
shellfile = open(FLAGS.boot_script_template, "r")
s = string.Template(shellfile.read())
shellfile.close()
boot_script = s.substitute(cc_dmz=FLAGS.ec2_dmz_host,
cc_port=FLAGS.ec2_port,
dmz_net=FLAGS.dmz_net,
dmz_mask=FLAGS.dmz_mask,
num_vpn=FLAGS.cnt_vpn_clients)
# genvpn, sign csr
crypto.generate_vpn_files(project_id)
z.writestr('autorun.sh', boot_script)
crl = os.path.join(crypto.ca_folder(project_id), 'crl.pem')
z.write(crl, 'crl.pem')
server_key = os.path.join(crypto.ca_folder(project_id),
'server.key')
z.write(server_key, 'server.key')
ca_crt = os.path.join(crypto.ca_path(project_id))
z.write(ca_crt, 'ca.crt')
server_crt = os.path.join(crypto.ca_folder(project_id),
'server.crt')
z.write(server_crt, 'server.crt')
z.close()
zippy = open(zippath, "r")
# NOTE(vish): run instances expects encoded userdata, it is decoded
# in the get_metadata_call. autorun.sh also decodes the zip file,
# hence the double encoding.
encoded = zippy.read().encode("base64").encode("base64")
zippy.close()
return encoded
def launch_vpn_instance(self, context):
LOG.debug(_("Launching VPN for %s") % (context.project_id))
key_name = self.setup_key_pair(context)
group_name = self.setup_security_group(context)
instance_type = instance_types.get_instance_type_by_name(
FLAGS.vpn_instance_type)
instance_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
user_data = self.get_encoded_zip(context.project_id)
return self.compute_api.create(context,
instance_type,
FLAGS.vpn_image_id,
display_name=instance_name,
user_data=user_data,
key_name=key_name,
security_group=[group_name])
def setup_security_group(self, context):
group_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
if db.security_group_exists(context, context.project_id, group_name):
return group_name
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': group_name,
'description': 'Group for vpn'}
group_ref = db.security_group_create(context, group)
rule = {'parent_group_id': group_ref['id'],
'cidr': '0.0.0.0/0',
'protocol': 'udp',
'from_port': 1194,
'to_port': 1194}
db.security_group_rule_create(context, rule)
rule = {'parent_group_id': group_ref['id'],
'cidr': '0.0.0.0/0',
'protocol': 'icmp',
'from_port': -1,
'to_port': -1}
db.security_group_rule_create(context, rule)
# NOTE(vish): No need to trigger the group since the instance
# has not been run yet.
return group_name
def setup_key_pair(self, context):
key_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
try:
keypair_api = compute.api.KeypairAPI()
result = keypair_api.create_key_pair(context,
context.user_id,
key_name)
private_key = result['private_key']
key_dir = os.path.join(FLAGS.keys_path, context.user_id)
utils.ensure_tree(key_dir)
key_path = os.path.join(key_dir, '%s.pem' % key_name)
with open(key_path, 'w') as f:
f.write(private_key)
except (exception.Duplicate, os.error, IOError):
pass
return key_name
| {
"content_hash": "f3a697fa47cfe44c40aa4cbd1aecceae",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 79,
"avg_line_length": 40.5,
"alnum_prop": 0.5322552599547905,
"repo_name": "paulmathews/nova",
"id": "206ce1835935d6b7b5c48d6935506126bd97a0dd",
"size": "6528",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/folsom",
"path": "nova/cloudpipe/pipelib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7293434"
},
{
"name": "Shell",
"bytes": "16910"
}
],
"symlink_target": ""
} |
"""Constant strings used throughout the package."""
import re
from typing import Any, List, Mapping, Optional, Sequence, Tuple, Union
import tensorflow as tf
# TODO(b/245729353) Migrate string constants to enums where appropriate.
# Formatting string for feature names.
CONTEXT_FMT = '{stype}/{fname}'
FEATURE_FMT = '{stype}/{sname}.{fname}'
# Names of set types.
CONTEXT = 'context'
NODES = 'nodes'
EDGES = 'edges'
SetType = str # A value of CONTEXT, NODES or EDGES.
# Name of the implicitly-defined set size feature, source and target index
# features, for serialization.
SIZE_NAME = '#size'
SOURCE_NAME = '#source'
TARGET_NAME = '#target'
RESERVED_FEATURES = frozenset({SIZE_NAME, SOURCE_NAME, TARGET_NAME})
# The conventional feature name for the hidden state (neuron activations) of
# an edge set, a node set or the context. Not special in GraphTensor, but used
# in some modeling libraries on top if explicit names are not needed.
HIDDEN_STATE = 'hidden_state'
# The pattern of feature names that are not allowed on a graph tensor and
# schema.
RESERVED_REGEX = re.compile(r'#.*')
# The internal metadata key prefix to use for hyper adjacency.
INDEX_KEY_PREFIX = '#index.'
# All edges in an EdgeSet have the same number of incident nodes. Each incident
# node is identified by a unique tag, a small integer. For ordinary graphs,
# these are SOURCE and TARGET, by convention. Other or additional
# numbers can be used, e.g., for hypergraphs.
IncidentNodeTag = int
# Integer tags for selecting the specific endpoints of an edge in a
# HyperAdjacency.
SOURCE: IncidentNodeTag = 0
TARGET: IncidentNodeTag = 1
# Generic pool and broadcast ops allow the special case tfgnn.CONTEXT (a str)
# in addition to pooling from or broadcasting to tfgnn.SOURCE and tfgnn.TARGET.
IncidentNodeOrContextTag = Union[IncidentNodeTag, str]
FieldName = str # Name of a context, node set or edge set field
NodeSetName = str # Name of a NodeSet within a GraphTensor
EdgeSetName = str # Name of an EdgeSet within a GraphTensor
SetName = str # A NodeSetName or EdgeSetName
FieldNameOrNames = Union[FieldName, Sequence[FieldName]]
ShapeLike = Union[tf.TensorShape, Tuple[Optional[int], ...],
List[Optional[int]]]
Field = Union[tf.Tensor, tf.RaggedTensor]
FieldSpec = Union[tf.TensorSpec, tf.RaggedTensorSpec]
Fields = Mapping[FieldName, Field]
FieldsSpec = Mapping[FieldName, FieldSpec]
FieldOrFields = Union[Field, Fields]
# An arbitrarily deep nest of fields. Pytype cannot express this.
FieldsNest = Union[Field, List[Any], Tuple[Any], Mapping[str, Any]]
# If set, enables validation for objects contructed within the library. This
# flag does not interfere with validation flags controlled by user. It is used
# to better control library self-consistency.
#
# TODO(aferludin): disable in prod as those checks may be expensive.
validate_internal_results = True
# Default representation type for indices and size integers.
# Can be either tf.int32 or tf.int64.
#
# IMPORTANT: On TPUs tf.int64 is not implemented.
default_indices_dtype = tf.int32
# DEPRECATED
# An older name used before tensorflow_gnn 0.2.
DEFAULT_STATE_NAME = HIDDEN_STATE
| {
"content_hash": "cc9cbb86300ce4f19bdaedfc03220057",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 35.95454545454545,
"alnum_prop": 0.7550568900126422,
"repo_name": "tensorflow/gnn",
"id": "2ec302dce3be90ff522e9ddca18ed6d1b6249662",
"size": "3857",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_gnn/graph/graph_constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2491"
},
{
"name": "Python",
"bytes": "1770047"
},
{
"name": "Shell",
"bytes": "3120"
},
{
"name": "Starlark",
"bytes": "47061"
}
],
"symlink_target": ""
} |
from flask import render_template, request, redirect, \
url_for, flash, session, abort, current_app, jsonify
from flask.blueprints import Blueprint
from flask.ext.babel import gettext
from flask.ext.login import current_user, login_user, logout_user, \
login_required
from sqlalchemy.exc import OperationalError
from ldap3 import LDAPCommunicationError
from model import dormitory_from_name, user_from_ip, premature_dormitories
from model.default import BaseUser
from sipa.forms import flash_formerrors, LoginForm, AnonymousContactForm
from sipa.utils import current_user_name
from sipa.utils.exceptions import UserNotFound, PasswordInvalid
from sipa.utils.mail_utils import send_mail
import logging
logger = logging.getLogger(__name__)
bp_generic = Blueprint('generic', __name__)
@bp_generic.before_app_request
def log_request():
if 'sentry' in current_app.extensions:
current_app.extensions['sentry'].client.extra_context({
'current_user': current_user,
'ip_user': user_from_ip(request.remote_addr)
})
logging.getLogger(__name__ + '.http').debug(
'Incoming request: %s %s', request.method, request.path,
extra={'tags': {'user': current_user_name(),
'ip': request.remote_addr}}
)
@bp_generic.app_errorhandler(401)
@bp_generic.app_errorhandler(403)
@bp_generic.app_errorhandler(404)
def error_handler_redirection(e):
"""Handles errors by flashing an according message
:param e: The error
:return: A flask response with the according HTTP error code
"""
if e.code == 401:
message = gettext("Bitte melde Dich an, um die Seite zu sehen.")
elif e.code == 403:
message = gettext("Diese Funktion wird in deinem Wohnheim "
"nicht unterstützt.")
elif e.code == 404:
message = gettext("Das von Dir angeforderte Dokument gibt es nicht.")
else:
message = gettext("Es ist ein Fehler aufgetreten!")
return render_template(
'error.html',
errorcode=e.code,
message=message
), e.code
@bp_generic.app_errorhandler(OperationalError)
def exceptionhandler_sql(ex):
"""Handles global MySQL errors (server down).
"""
flash(gettext("Verbindung zum SQL-Server konnte nicht "
"hergestellt werden!"),
"error")
logger.critical('Unable to connect to MySQL server',
extra={'data': {'exception_args': ex.args}})
return redirect(url_for('generic.index'))
@bp_generic.app_errorhandler(LDAPCommunicationError)
def exceptionhandler_ldap(ex):
"""Handles global LDAPCommunicationError exceptions.
The session must be reset, because if the user is logged in and the server
fails during his session, it would cause a redirect loop.
This also resets the language choice, btw.
The alternative would be a try-except catch block in load_user, but login
also needs a handler.
"""
session.clear()
flash(gettext(
"Verbindung zum LDAP-Server konnte nicht hergestellt werden!"),
"error"
)
logger.critical(
'Unable to connect to LDAP server',
extra={'data': {'exception_args': ex.args}}
)
return redirect(url_for('generic.index'))
@bp_generic.route("/language/<string:lang>")
def set_language(lang='de'):
"""Set the session language via URL
"""
session['locale'] = lang
return redirect(request.referrer)
@bp_generic.route('/index.php')
@bp_generic.route('/')
def index():
return redirect(url_for('news.display'))
@bp_generic.route("/login", methods=['GET', 'POST'])
def login():
"""Login page for users
"""
form = LoginForm()
if form.validate_on_submit():
dormitory = dormitory_from_name(form.dormitory.data)
username = form.username.data
password = form.password.data
remember = form.remember.data
User = dormitory.datasource.user_class
valid_suffix = "@{}".format(dormitory.datasource.mail_server)
if username.endswith(valid_suffix):
username = username[:-len(valid_suffix)]
try:
user = User.authenticate(username, password)
except (UserNotFound, PasswordInvalid) as e:
cause = "username" if isinstance(e, UserNotFound) else "password"
logger.info("Authentication failed: Wrong %s", cause, extra={
'tags': {'user': username, 'rate_critical': True}
})
flash(gettext("Anmeldedaten fehlerhaft!"), "error")
else:
if isinstance(user, User):
session['dormitory'] = dormitory.name
login_user(user, remember=remember)
logger.info('Authentication successful',
extra={'tags': {'user': username}})
flash(gettext("Anmeldung erfolgreich!"), "success")
elif form.is_submitted():
flash_formerrors(form)
if current_user.is_authenticated:
return redirect(url_for('usersuite.usersuite'))
return render_template('login.html', form=form,
unsupported=premature_dormitories)
@bp_generic.route("/logout")
@login_required
def logout():
logger.info("Logging out",
extra={'tags': {'user': current_user.uid}})
logout_user()
flash(gettext("Abmeldung erfolgreich!"), 'success')
return redirect(url_for('.index'))
@bp_generic.route("/usertraffic")
def usertraffic():
"""Show a user's traffic on a static site just as in the usersuite.
If a user is logged but the ip corresponds to another user, a hint
is flashed and the traffic of the `ip_user` is displayed.
"""
ip_user = user_from_ip(request.remote_addr)
chosen_user = None
if current_user.is_authenticated:
chosen_user = current_user
if ip_user.is_authenticated:
chosen_user = ip_user
if current_user.is_authenticated:
if current_user != ip_user:
flash(gettext("Ein anderer Nutzer als der für diesen "
"Anschluss Eingetragene ist angemeldet!"),
'warning')
flash(gettext("Hier werden die Trafficdaten "
"dieses Anschlusses angezeigt."), "info")
if chosen_user:
user_id = chosen_user.id.value if chosen_user.id.supported else None
return render_template("usertraffic.html",
user_id=user_id,
traffic_user=chosen_user)
abort(401)
@bp_generic.route('/usertraffic/json')
def traffic_api():
user = (current_user if current_user.is_authenticated
else user_from_ip(request.remote_addr))
data = {}
data['quota'] = user.credit
data['history'] = [{'day': day['day'], 'in': day['input'], 'out': day['output']}
for day in user.traffic_history]
return jsonify(version=3, **data)
@bp_generic.route('/contact', methods=['GET', 'POST'])
def contact():
form = AnonymousContactForm()
if form.validate_on_submit():
from_mail = form.email.data
subject = "[Kontakt] {}".format(form.subject.data)
message = form.message.data
dormitory = dormitory_from_name(form.dormitory.data)
support_mail = dormitory.datasource.support_mail
if send_mail(from_mail, support_mail, subject, message):
flash(gettext("Nachricht wurde versandt."), "success")
else:
flash(gettext("Es gab einen Fehler beim Versenden der Nachricht."),
'error')
return redirect(url_for(".index"))
elif form.is_submitted():
flash_formerrors(form)
elif current_user.is_authenticated:
flash(gettext("Sicher, dass Du das anonyme Formular "
"benutzen möchtest? Dies ist nur erforderlich, wenn Du "
"Administratoren eines anderen Wohnheims "
"kontaktieren willst."), 'info')
return render_template('anonymous_contact.html', form=form)
| {
"content_hash": "d977891db396bdce6016dc2e1e5fcbe7",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 84,
"avg_line_length": 34.13502109704641,
"alnum_prop": 0.6292954264524104,
"repo_name": "fgrsnau/sipa",
"id": "55b88243fa39ac6db420f2b834460bc61acc6412",
"size": "8119",
"binary": false,
"copies": "1",
"ref": "refs/heads/issue-150",
"path": "sipa/blueprints/generic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2227"
},
{
"name": "HTML",
"bytes": "39400"
},
{
"name": "JavaScript",
"bytes": "279497"
},
{
"name": "Makefile",
"bytes": "265"
},
{
"name": "Nginx",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "118302"
}
],
"symlink_target": ""
} |
"""
@package ion.services.sa.observatory.test.activation_test_resources
@file ion/services/sa/observatory/test/activation_test_resources.py
@author Edward Hunter
@brief Object configurations for activation tests. Unclutters test file.
"""
__author__ = 'Edward Hunter'
# Pyon object and resource imports.
from pyon.public import IonObject, log, RT, PRED, LCS, OT, CFG
from interface.objects import PlatformSite
from interface.objects import InstrumentSite
from interface.objects import Deployment
from interface.objects import CabledInstrumentDeploymentContext
from interface.objects import CabledNodeDeploymentContext
from interface.objects import RemotePlatformDeploymentContext
from interface.objects import GeospatialBounds
from interface.objects import GeospatialCoordinateReferenceSystem
from interface.objects import GeospatialIndex
from interface.objects import TemporalBounds
from interface.objects import PlatformPort
from interface.objects import SiteEnvironmentType
from interface.objects import CommissionedStatusType
from interface.objects import ContactInformation
from interface.objects import DriverTypeEnum
from interface.objects import DeploymentTypeEnum
# Following define resources load atop preload until
# preload is augmented to include them. We assume the system
# exists will all sites and deployments existing.
RSN_FACILITY_NAME = 'RSN Facility'
RSN_FACILITY_ALT_ID = 'MF_RSN'
RSN_PLATFORM_SITE = 'Medium Power JBox 01A - Regional Continental Margin Base'
RSN_INSTRUMENT_SITE = 'Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base'
RSN_INSTRUMENT_01 = 'Instrument RS01SLBS-MJ01A-02-PRESTA999 device #01'
RSN_INSTRUMENT_02 = 'Instrument RS01SLBS-MJ01A-02-PRESTA999 device #02'
RSN_PLATFORM_ALT_ID = 'RS01SLBS-MJ01A_PD'
RSN_PLATFORM_DEPLOYMENT_ALT_ID = 'RS01SLBS-MJ01A_DEP'
RSN_PLATFORM_SITE_ALT_ID = 'RS01SLBS-MJ01A'
# 3-Wavelength Fluorometer on Mooring Riser 003 - Coastal Pioneer Central
# Instrument CP01CNSM-RI003-05-FLORTD999 device #01
EXAMPLE_DEVICE_ALT_ID = 'CP01CNSM-RI003-05-FLORTD999_ID'
CGSN_MOORING_PLATFORM_ALT_ID = 'GP03FLMA-FM001_PD'
CGSN_MOORING_DEPLOYMENT_ALT_ID = 'GP03FLMA-FM001_DEP'
CGSN_RISER_PLATFORM_ALT_ID = 'GP03FLMA-RI001_PD'
CGSN_RISER_INSTRUMENT_ALT_ID = 'GP03FLMA-RI001-16-CTDMOH999_ID'
#CGSN_MOORING_MODEL_ALT_ID = 'LM_PM'
CGSN_MOORING_MODEL_ALT_ID = 'FM_PM'
CGSN_RISER_MODEL_ALT_ID = 'RI_PM'
CGSN_MOORING_SITE_ALT_ID = 'GP03FLMA-FM001'
CGSN_RISER_SITE_ALT_ID = 'GP03FLMA-RI001'
#CGSN_INSTRUMENT_MODEL_ALT_ID = 'CTDMOG'
CGSN_INSTRUMENT_MODEL_ALT_ID = 'CTDMOG'
CGSN_FACILITY_ALT_ID = 'MF_CGSN'
RSN_INSTRUMENT_01 = dict(
org='rsn',
instrument_model = 'PRESTA',
platform_device = 'RS01SLBS-MJ01A_PD',
site = 'RS01SLBS-MJ01A-02-PRESTA999',
name='Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base',
description='Instrument RS01SLBS-MJ01A-02-PRESTA999 device #01',
alt_ids=["PRE:RS01SLBS-MJ01A-02-PRESTA999_ID"],
serial_number='123123',
monitorable=True,
controllable=True,
message_controllable=True,
custom_attributes={},
contacts=[ContactInformation()],
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
last_calibration_datetime='',
hardware_version='',
firmware_version='',
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#ts_created='',
#ts_updated='',
#addl={},
)
RSN_INSTRUMENT_02 = dict (
name='Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base',
description='Instrument RS01SLBS-MJ01A-02-PRESTA999 device #02',
alt_ids=[],
serial_number='456456',
monitorable=True,
controllable=True,
message_controllable=True,
custom_attributes={},
contacts=[ContactInformation()],
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
last_calibration_datetime='',
hardware_version='',
firmware_version='',
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#ts_created='',
#ts_updated='',
#addl={},
)
RSN_AGENT_01 = dict(
org='rsn',
agent='Agent-0.1-PRESTA',
device = 'RS01SLBS-MJ01A-02-PRESTA999_ID',
name='PRESTA Agent Instance',
description='Instrument Agent Instance for PRESTA Device',
alt_ids=['PRE:AgentInstance-PRESTA'],
agent_config={},
startup_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
port_agent_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
)
RSN_AGENT_02 = dict(
#org='rsn',
#agent='Agent-0.1-PRESTA',
#device = 'RS01SLBS-MJ01A-02-PRESTA999_ID',
name='PRESTA Agent Instance',
description='Instrument Agent Instance for PRESTA Device',
alt_ids=['PRE:AgentInstance-PRESTA'],
agent_config={},
startup_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
port_agent_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
)
AUGMENT_INSTRUMENT_SITES = [
dict(
org='rsn',
parent_site = 'RS01SLBS-MJ01A',
instrument_models = ['PRESTA'],
name='Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base',
description='Instrument: RS01SLBS-MJ01A-02-PRESTA999',
alt_ids=["OOI:RS01SLBS-MJ01A-02-PRESTA999", "PRE:RS01SLBS-MJ01A-02-PRESTA999"],
local_name='Tidal Seafloor Pressure (PRESTA)',
reference_designator='RS01SLBS-MJ01A-02-PRESTA999',
environment=SiteEnvironmentType.FIELD,
constraint_list=[GeospatialBounds(), TemporalBounds()],
coordinate_reference_system=GeospatialCoordinateReferenceSystem(),
geospatial_point_center=GeospatialIndex(),
planned_uplink_port=PlatformPort(),
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#ts_created='',
#ts_updated='',
#addl=None,
#alt_resource_type=''
)
]
AUGMENT_PLATFORM_DEVICES =[
dict(
org='rsn',
platform_model='MJ_PM',
parent_device='RS01SLOP-PN01A_PD',
network_parent='RS01SLOP-PN01A_PD',
name='Medium Power JBox 01A - Regional Continental Margin Base device #01',
description='Platform RS01SLBS-MJ01A device #01',
alt_ids=["PRE:RS01SLBS-MJ01A_PD"],
serial_number='464646',
monitorable=True,
controllable=True,
message_controllable=True,
platform_monitor_attributes=[],
custom_attributes={},
ports=[],
contacts=[ContactInformation()],
index_location=GeospatialIndex(),
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#ts_created='',
#ts_updated='',
#addl={},
)
]
AUGMENT_PLATFORM_AGENTS =[
dict(
org='rsn',
models = ['MJ_PM'],
name='MJ_PM Agent 0.1',
description='Instrument Agent for Medium Power JBox Device',
alt_ids=['PRE:Agent-0.1-MJ_PM'],
agent_module='path.to.agent.mod',
agent_class='agent_class',
agent_uri='agent_uri',
agent_version='0.1',
agent_default_config={},
stream_configurations=[],
driver_module='path.to.driver.mod',
driver_class='driver_class',
driver_uri='driver_uri',
driver_version='0.1',
driver_type=DriverTypeEnum.CLASS,
commissioned=CommissionedStatusType.COMMISSIONED,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_=InstrumentAgent,
#ts_created='',
#ts_updated='',
#addl={},
)
]
AUGMENT_PLATFORM_AGENT_INSTANCES =[
dict(
org='rsn',
agent='Agent-0.1-MJ_PM',
device = 'RS01SLBS-MJ01A_PD',
name='MJ_PM Agent Instance',
description='Instrument Agent Instance for Medium Power JBox Device',
alt_ids=['PRE:AgentInstance-MJ_PM'],
agent_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
)
]
AUGMENT_INSTRUMENT_DEVICES = [
RSN_INSTRUMENT_01
]
AUGMENT_INSTRUMENT_AGENTS =[
dict(
org='rsn',
models = ['PRESTA'],
name='PRESTA Agent 0.1',
description='Instrument Agent for PRESTA Device',
alt_ids=['PRE:Agent-0.1-PRESTA'],
agent_module='path.to.agent.mod',
agent_class='agent_class',
agent_uri='agent_uri',
agent_version='0.1',
agent_default_config={},
stream_configurations=[],
driver_module='path.to.driver.mod',
driver_class='driver_class',
driver_uri='driver_uri',
driver_version='0.1',
driver_type=DriverTypeEnum.EXT_PROCESS,
commissioned=CommissionedStatusType.COMMISSIONED,
lcstate='DEPLOYED',
availability='AVAILABLE',
#type_=InstrumentAgent,
#ts_created='',
#ts_updated='',
#addl={},
)
]
AUGMENT_INSTRUMENT_AGENT_INSTANCES =[
RSN_AGENT_01
]
AUGMENT_DATASET_AGENTS =[
]
AUGMENT_DATASET_AGENT_INSTANCES =[
]
AUGMENT_PLATFORM_DEPLOYMENTS = [
dict(
org='rsn',
platform_site='RS01SLBS-MJ01A',
platform_device='RS01SLBS-MJ01A_PD',
name='Deployment of platform RS01SLBS-MJ01A',
description='Deployment: RS01SLBS-MJ01A_DEP',
alt_ids=["PRE:RS01SLBS-MJ01A_DEP"],
constraint_list=[TemporalBounds()],
coordinate_reference_system=GeospatialCoordinateReferenceSystem(),
geospatial_point_center=GeospatialIndex(),
port_assignments={},
context=CabledNodeDeploymentContext(),
auxiliary_name=None,
auxiliary_identifier=None,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#ts_created='',
#ts_updated='',
#addl=None,
)
]
RSN_INST_DEPLOYMENT_1 = dict(
org='rsn',
instrument_site='RS01SLBS-MJ01A-02-PRESTA999',
instrument_device='RS01SLBS-MJ01A-02-PRESTA999_ID',
name='Deployment of instrument RS01SLBS-MJ01A-02-PRESTA999_ID',
description='Deployment: RS01SLBS-MJ01A-02-PRESTA999_DEP',
alt_ids=['PRE:RS01SLBS-MJ01A-02-PRESTA999_DEP'],
constraint_list=[TemporalBounds()],
coordinate_reference_system=GeospatialCoordinateReferenceSystem(),
geospatial_point_center=GeospatialIndex(),
port_assignments={},
context=CabledNodeDeploymentContext(),
auxiliary_name=None,
auxiliary_identifier=None,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#ts_created='',
#ts_updated='',
#addl=None,
)
RSN_INST_DEPLOYMENT_2 = dict(
#org='rsn',
#instrument_site='RS01SLBS-MJ01A-02-PRESTA999',
#instrument_device='RS01SLBS-MJ01A-02-PRESTA999_ID',
name='Deployment of instrument RS01SLBS-MJ01A-02-PRESTA999_ID',
description='Deployment: RS01SLBS-MJ01A-02-PRESTA999_DEP',
alt_ids=['PRE:RS01SLBS-MJ01A-02-PRESTA999_DEP'],
constraint_list=[TemporalBounds()],
coordinate_reference_system=GeospatialCoordinateReferenceSystem(),
geospatial_point_center=GeospatialIndex(),
port_assignments={},
context=CabledNodeDeploymentContext(),
auxiliary_name=None,
auxiliary_identifier=None,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#ts_created='',
#ts_updated='',
#addl=None,
)
AUGMENT_INSTRUMENT_DEPLOYMENTS = [
RSN_INST_DEPLOYMENT_1
]
CGSN_DEPLOYMENT_2 = dict(
name='Deployment of platform GP03FLMA-FM001',
description='Deployment: GP03FLMA-FM001_DEP',
coordinate_reference_system=GeospatialCoordinateReferenceSystem(),
constraint_list=[TemporalBounds()],
context=RemotePlatformDeploymentContext({'device_mounting_positions': []}),
geospatial_point_center=GeospatialIndex({'lat': 0.0, 'lon': 0.0}),
addl={},
port_assignments={},
)
CGSN_MOORING_PLATFORM_2 = dict(
name='Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A',
description='Platform GP03FLMA-FM001 device #02',
serial_number='111333',
monitorable=True,
controllable=True,
message_controllable=True,
platform_monitor_attributes=[],
custom_attributes={},
ports=[],
contacts=[ContactInformation()],
index_location=GeospatialIndex(),
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
)
CGSN_RISER_PLATFORM_2 = dict(
name='Mooring Riser 001 - Global Station Papa Mesoscale Flanking A',
description='Platform GP03FLMA-RI001 device #02',
serial_number='222333',
monitorable=True,
controllable=True,
message_controllable=True,
platform_monitor_attributes=[],
custom_attributes={},
ports=[],
contacts=[ContactInformation()],
index_location=GeospatialIndex(),
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
)
CGSN_INSTRUMENTS_2 = [
dict(
name='CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A',
description='Instrument GP03FLMA-RI001-09-CTDMOG999 device #02',
alt_ids=[],
serial_number='2223',
monitorable=True,
controllable=True,
message_controllable=True,
custom_attributes={},
contacts=[ContactInformation()],
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
last_calibration_datetime='',
hardware_version='',
firmware_version='',
),
dict(
name='CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A',
description='Instrument GP03FLMA-RI001-17-CTDMOG999 device #02',
alt_ids=[],
serial_number='2224',
monitorable=True,
controllable=True,
message_controllable=True,
custom_attributes={},
contacts=[ContactInformation()],
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
last_calibration_datetime='',
hardware_version='',
firmware_version='',
),
dict(
name='CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A',
description='Instrument GP03FLMA-RI001-13-CTDMOG999 device #02',
alt_ids=[],
serial_number='2225',
monitorable=True,
controllable=True,
message_controllable=True,
custom_attributes={},
contacts=[ContactInformation()],
reference_urls=[],
commissioned=CommissionedStatusType.COMMISSIONED,
last_calibration_datetime='',
hardware_version='',
firmware_version='',
)
]
CGSN_MOORING_AGENT = dict(
name='Low Power Sub-surface Mooring Agent 0.1',
description='Instrument Agent for GP03FLMA-FM001 Device',
alt_ids=[],
agent_module='path.to.agent.mod',
agent_class='agent_class',
agent_uri='agent_uri',
agent_version='0.1',
agent_default_config={},
stream_configurations=[],
driver_module='path.to.driver.mod',
driver_class='driver_class',
driver_uri='driver_uri',
driver_version='0.1',
driver_type=DriverTypeEnum.EXT_PROCESS,
commissioned=CommissionedStatusType.COMMISSIONED,
lcstate='DEPLOYED',
availability='AVAILABLE',
#type_=InstrumentAgent,
#ts_created='',
#ts_updated='',
#addl={},
)
CGSN_RISER_AGENT = dict(
name='Mooring Riser Agent 0.1',
description='Instrument Agent for Mooring Riser Device',
alt_ids=[],
agent_module='path.to.agent.mod',
agent_class='agent_class',
agent_uri='agent_uri',
agent_version='0.1',
agent_default_config={},
stream_configurations=[],
driver_module='path.to.driver.mod',
driver_class='driver_class',
driver_uri='driver_uri',
driver_version='0.1',
driver_type=DriverTypeEnum.EXT_PROCESS,
commissioned=CommissionedStatusType.COMMISSIONED,
lcstate='DEPLOYED',
availability='AVAILABLE',
#type_=InstrumentAgent,
#ts_created='',
#ts_updated='',
#addl={},
)
CGSN_MOORING_AGENT_INSTANCE = dict(
name='Low Power Sub-surface Mooring Agent Instance',
description='Instrument Agent Instance for Low Power Sub-surface Mooring Device',
alt_ids=[],
agent_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
)
CGSN_RISER_AGENT_INSTANCE = dict(
name='Mooring Riser Agent Instance',
description='Instrument Agent Instance for Mooring Riser Device',
alt_ids=[],
agent_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
)
CGSN_INSTRUMENT_AGENT = dict(
name='CTDMO Agent 0.1',
description='Instrument Agent for CTDMO Device',
alt_ids=[],
agent_module='path.to.agent.mod',
agent_class='agent_class',
agent_uri='agent_uri',
agent_version='0.1',
agent_default_config={},
stream_configurations=[],
driver_module='path.to.driver.mod',
driver_class='driver_class',
driver_uri='driver_uri',
driver_version='0.1',
driver_type=DriverTypeEnum.EXT_PROCESS,
commissioned=CommissionedStatusType.COMMISSIONED,
lcstate='DEPLOYED',
availability='AVAILABLE',
#type_=InstrumentAgent,
#ts_created='',
#ts_updated='',
#addl={},
)
CGSN_INSTURMENT_AGENT_INSTANCES = [
dict(
name='CTDMO Agent Instance',
description='Instrument Agent Instance for CTDMO Device',
alt_ids=[],
agent_config={},
startup_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
port_agent_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
),
dict(
name='CTDMO Agent Instance',
description='Instrument Agent Instance for CTDMO Device',
alt_ids=[],
agent_config={},
startup_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
port_agent_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
),
dict(
name='CTDMO Agent Instance',
description='Instrument Agent Instance for CTDMO Device',
alt_ids=[],
agent_config={},
startup_config={},
agent_spawn_config={},
saved_agent_state={},
driver_config={},
port_agent_config={},
alerts=[],
agent_process_id='',
deployment_type=DeploymentTypeEnum.PROCESS,
#lcstate='DEPLOYED',
#availability='AVAILABLE',
#type_='InstrumentAgentInstance',
#ts_created='',
#ts_updated='',
#addl={},
),
]
"""
The following comments show the object and association configurations as
elements are cycled through the activation cycle and verified.
RSN case: follows a single instrument and instrument deployment objects
(only instrument shown below).
CGSN case: follows the mooring platfom and deployment objects.
(shows both platform and deployment objects).
In both cases both objects are verified in tests.
"""
"""
**RSN CASE
INITIAL DEPLOYED:
========================================================================================================================================================================================================
lcstate DEPLOYED
_rev 3
firmware_version
availability AVAILABLE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
custom_attributes {}
monitorable True
serial_number
addl {}
message_controllable True
description Instrument RS01SLBS-MJ01A-02-PRESTA999 device #01
reference_urls []
ts_updated 1393345380674
commissioned 2
ts_created 1393345380539
last_calibration_datetime
name Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
alt_ids ['PRE:RS01SLBS-MJ01A-02-PRESTA999_ID']
hardware_version
type_ InstrumentDevice
_id 62d3fbcbeb4f4e09886e36de0608abe0
========================================================================================================================================================================================================
InstrumentSite Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base hasDevice this InstrumentDevice
PlatformDevice Medium Power JBox 01A - Regional Continental Margin Base device #01 hasDevice this InstrumentDevice
Org RSN Facility hasResource this InstrumentDevice
========================================================================================================================================================================================================
this InstrumentDevice hasAgentInstance InstrumentAgentInstance PRESTA Agent Instance
this InstrumentDevice hasDataProducer DataProducer Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
this InstrumentDevice hasDeployment Deployment Deployment of instrument RS01SLBS-MJ01A-02-PRESTA999_ID
this InstrumentDevice hasModel InstrumentModel Tidal Seafloor Pressure (PREST-A)
========================================================================================================================================================================================================
INITIAL DEACTIVATED:
========================================================================================================================================================================================================
lcstate INTEGRATED
_rev 4
firmware_version
availability AVAILABLE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
custom_attributes {}
monitorable True
serial_number
addl {}
message_controllable True
description Instrument RS01SLBS-MJ01A-02-PRESTA999 device #01
reference_urls []
ts_updated 1393345384693
commissioned 2
ts_created 1393345380539
last_calibration_datetime
name Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
alt_ids ['PRE:RS01SLBS-MJ01A-02-PRESTA999_ID']
hardware_version
type_ InstrumentDevice
_id 62d3fbcbeb4f4e09886e36de0608abe0
========================================================================================================================================================================================================
PlatformDevice Medium Power JBox 01A - Regional Continental Margin Base device #01 hasDevice this InstrumentDevice
Org RSN Facility hasResource this InstrumentDevice
========================================================================================================================================================================================================
this InstrumentDevice hasAgentInstance InstrumentAgentInstance PRESTA Agent Instance
this InstrumentDevice hasDataProducer DataProducer Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
this InstrumentDevice hasDeployment Deployment Deployment of instrument RS01SLBS-MJ01A-02-PRESTA999_ID
this InstrumentDevice hasModel InstrumentModel Tidal Seafloor Pressure (PREST-A)
========================================================================================================================================================================================================
FINAL DEACTIVATED:
========================================================================================================================================================================================================
lcstate DEVELOPED
_rev 6
firmware_version
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
custom_attributes {}
monitorable True
serial_number
addl {}
message_controllable True
description Instrument RS01SLBS-MJ01A-02-PRESTA999 device #01
reference_urls []
ts_updated 1393345917044
commissioned 2
ts_created 1393345912849
last_calibration_datetime
name Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
alt_ids ['PRE:RS01SLBS-MJ01A-02-PRESTA999_ID']
hardware_version
type_ InstrumentDevice
_id ec36a44227a04668bef3a509f276f22e
========================================================================================================================================================================================================
Org RSN Facility hasResource this InstrumentDevice
========================================================================================================================================================================================================
this InstrumentDevice hasDataProducer DataProducer Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
this InstrumentDevice hasDeployment Deployment Deployment of instrument RS01SLBS-MJ01A-02-PRESTA999_ID
this InstrumentDevice hasModel InstrumentModel Tidal Seafloor Pressure (PREST-A)
========================================================================================================================================================================================================
DEVELOPED:
========================================================================================================================================================================================================
lcstate DEVELOPED
_rev 2
firmware_version
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
custom_attributes {}
monitorable True
serial_number
addl {}
message_controllable True
description Instrument RS01SLBS-MJ01A-02-PRESTA999 device #02
reference_urls []
ts_updated 1393346414677
commissioned 2
ts_created 1393346414367
last_calibration_datetime
name Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
alt_ids []
hardware_version
type_ InstrumentDevice
_id 9fdcaa1b73054bf795b113988f5d73d8
========================================================================================================================================================================================================
Org RSN Facility hasResource this InstrumentDevice
========================================================================================================================================================================================================
this InstrumentDevice hasAgentInstance InstrumentAgentInstance PRESTA Agent Instance
this InstrumentDevice hasDataProducer DataProducer Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
this InstrumentDevice hasModel InstrumentModel Tidal Seafloor Pressure (PREST-A)
========================================================================================================================================================================================================
INTEGRATED:
========================================================================================================================================================================================================
lcstate INTEGRATED
_rev 3
firmware_version
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
custom_attributes {}
monitorable True
serial_number
addl {}
message_controllable True
description Instrument RS01SLBS-MJ01A-02-PRESTA999 device #02
reference_urls []
ts_updated 1393346415054
commissioned 2
ts_created 1393346414367
last_calibration_datetime
name Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
alt_ids []
hardware_version
type_ InstrumentDevice
_id 9fdcaa1b73054bf795b113988f5d73d8
========================================================================================================================================================================================================
PlatformDevice Medium Power JBox 01A - Regional Continental Margin Base device #01 hasDevice this InstrumentDevice
Org RSN Facility hasResource this InstrumentDevice
========================================================================================================================================================================================================
this InstrumentDevice hasAgentInstance InstrumentAgentInstance PRESTA Agent Instance
this InstrumentDevice hasDataProducer DataProducer Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
this InstrumentDevice hasModel InstrumentModel Tidal Seafloor Pressure (PREST-A)
========================================================================================================================================================================================================
DEPLOYED
========================================================================================================================================================================================================
lcstate DEPLOYED
_rev 5
firmware_version
availability AVAILABLE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
custom_attributes {}
monitorable True
serial_number
addl {}
message_controllable True
description Instrument RS01SLBS-MJ01A-02-PRESTA999 device #02
reference_urls []
ts_updated 1393346415223
commissioned 2
ts_created 1393346414367
last_calibration_datetime
name Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
alt_ids []
hardware_version
type_ InstrumentDevice
_id 9fdcaa1b73054bf795b113988f5d73d8
========================================================================================================================================================================================================
InstrumentSite Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base hasDevice this InstrumentDevice
PlatformDevice Medium Power JBox 01A - Regional Continental Margin Base device #01 hasDevice this InstrumentDevice
Org RSN Facility hasResource this InstrumentDevice
========================================================================================================================================================================================================
this InstrumentDevice hasAgentInstance InstrumentAgentInstance PRESTA Agent Instance
this InstrumentDevice hasDataProducer DataProducer Tidal Seafloor Pressure on Medium Power JBox 01A - Regional Continental Margin Base
this InstrumentDevice hasDeployment Deployment Deployment of instrument RS01SLBS-MJ01A-02-PRESTA999_ID
this InstrumentDevice hasModel InstrumentModel Tidal Seafloor Pressure (PREST-A)
========================================================================================================================================================================================================
"""
"""
** CGSN CASE
INITIAL DEPLOYED
========================================================================================================================================================================================================
lcstate DEPLOYED
_rev 3
availability AVAILABLE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': 'John', 'city': 'Falmouth', 'roles': ['primary'], 'administrative_area': '', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'Woods Hole Oceanographic (Raytheon)', 'postal_code': '', 'individual_name_family': 'Cosgrove', 'phones': [Phone({'phone_number': '508-289-4904', 'phone_type': 'office'})], 'position_name': 'OOI OMC admin', 'email': 'jcosgrove@whoi.edu', 'street_address': ''}), ContactInformation({'individual_names_given': 'Kurt', 'city': '', 'roles': ['alternate'], 'administrative_area': 'MA', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'WHOI-OOI', 'postal_code': '', 'individual_name_family': 'Stiffel', 'phones': [Phone({'phone_number': '508-289-3920', 'phone_type': 'office'})], 'position_name': 'Instrument Lead', 'email': 'Kurt_Stiffel@raytheon.com', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-FM001 device #01
reference_urls []
ts_updated 1393369548413
commissioned 1
ts_created 1393369548265
name Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
alt_ids ['PRE:GP03FLMA-FM001_PD']
type_ PlatformDevice
_id f89ebf56a36c4bc9bb62f564ebc79dee
ports []
========================================================================================================================================================================================================
PlatformSite Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasDataProducer DataProducer Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDeployment Deployment Deployment of platform GP03FLMA-FM001
this PlatformDevice hasDevice PlatformDevice Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasModel PlatformModel Low Power Sub-surface Mooring
this PlatformDevice hasOwner ActorIdentity ionsystem
========================================================================================================================================================================================================
========================================================================================================================================================================================================
lcstate DEPLOYED
_rev 3
availability AVAILABLE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': 'John', 'city': 'Falmouth', 'roles': ['primary'], 'administrative_area': '', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'Woods Hole Oceanographic (Raytheon)', 'postal_code': '', 'individual_name_family': 'Cosgrove', 'phones': [Phone({'phone_number': '508-289-4904', 'phone_type': 'office'})], 'position_name': 'OOI OMC admin', 'email': 'jcosgrove@whoi.edu', 'street_address': ''}), ContactInformation({'individual_names_given': 'Kurt', 'city': '', 'roles': ['alternate'], 'administrative_area': 'MA', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'WHOI-OOI', 'postal_code': '', 'individual_name_family': 'Stiffel', 'phones': [Phone({'phone_number': '508-289-3920', 'phone_type': 'office'})], 'position_name': 'Instrument Lead', 'email': 'Kurt_Stiffel@raytheon.com', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-RI001 device #01
reference_urls []
ts_updated 1393369542827
commissioned 1
ts_created 1393369542677
name Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
alt_ids ['PRE:GP03FLMA-RI001_PD']
type_ PlatformDevice
_id 6af9f084229d4cca9ecf90551bd7bf3f
ports []
========================================================================================================================================================================================================
PlatformSite Mooring Riser 001 - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
PlatformDevice Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasDataProducer DataProducer Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasOwner ActorIdentity ionsystem
this PlatformDevice hasModel PlatformModel Mooring Riser
this PlatformDevice hasDevice InstrumentDevice Velocity Profiler (long range) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice Dissolved Oxygen Stable Response on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice Seawater pH on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice 3-Wavelength Fluorometer on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
========================================================================================================================================================================================================
DEACTIVATED
========================================================================================================================================================================================================
lcstate DEVELOPED
_rev 6
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': 'John', 'city': 'Falmouth', 'roles': ['primary'], 'administrative_area': '', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'Woods Hole Oceanographic (Raytheon)', 'postal_code': '', 'individual_name_family': 'Cosgrove', 'phones': [Phone({'phone_number': '508-289-4904', 'phone_type': 'office'})], 'position_name': 'OOI OMC admin', 'email': 'jcosgrove@whoi.edu', 'street_address': ''}), ContactInformation({'individual_names_given': 'Kurt', 'city': '', 'roles': ['alternate'], 'administrative_area': 'MA', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'WHOI-OOI', 'postal_code': '', 'individual_name_family': 'Stiffel', 'phones': [Phone({'phone_number': '508-289-3920', 'phone_type': 'office'})], 'position_name': 'Instrument Lead', 'email': 'Kurt_Stiffel@raytheon.com', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-FM001 device #01
reference_urls []
ts_updated 1393374834914
commissioned 1
ts_created 1393374783492
name Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
alt_ids ['PRE:GP03FLMA-FM001_PD']
type_ PlatformDevice
_id cdf3238286224bc88e73e8158044ddcc
ports []
========================================================================================================================================================================================================
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasDataProducer DataProducer Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDeployment Deployment Deployment of platform GP03FLMA-FM001
this PlatformDevice hasDevice PlatformDevice Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasModel PlatformModel Low Power Sub-surface Mooring
this PlatformDevice hasOwner ActorIdentity ionsystem
========================================================================================================================================================================================================
========================================================================================================================================================================================================
lcstate DEVELOPED
_rev 6
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': 'John', 'city': 'Falmouth', 'roles': ['primary'], 'administrative_area': '', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'Woods Hole Oceanographic (Raytheon)', 'postal_code': '', 'individual_name_family': 'Cosgrove', 'phones': [Phone({'phone_number': '508-289-4904', 'phone_type': 'office'})], 'position_name': 'OOI OMC admin', 'email': 'jcosgrove@whoi.edu', 'street_address': ''}), ContactInformation({'individual_names_given': 'Kurt', 'city': '', 'roles': ['alternate'], 'administrative_area': 'MA', 'url': '', 'country': 'USA', 'variables': [{'name': '', 'value': ''}], 'organization_name': 'WHOI-OOI', 'postal_code': '', 'individual_name_family': 'Stiffel', 'phones': [Phone({'phone_number': '508-289-3920', 'phone_type': 'office'})], 'position_name': 'Instrument Lead', 'email': 'Kurt_Stiffel@raytheon.com', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-RI001 device #01
reference_urls []
ts_updated 1393374834939
commissioned 1
ts_created 1393374777646
name Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
alt_ids ['PRE:GP03FLMA-RI001_PD']
type_ PlatformDevice
_id 25259df974bb4807bb97899bb8edafce
ports []
========================================================================================================================================================================================================
PlatformDevice Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasOwner ActorIdentity ionsystem
this PlatformDevice hasDataProducer DataProducer Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasModel PlatformModel Mooring Riser
========================================================================================================================================================================================================
DEVELOPED
========================================================================================================================================================================================================
lcstate DEVELOPED
_rev 2
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-FM001 device #02
reference_urls []
ts_updated 1393463134032
commissioned 2
ts_created 1393463133959
name Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
alt_ids []
type_ PlatformDevice
_id 0209958f033b4cecada3184243a5f289
ports []
========================================================================================================================================================================================================
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasDataProducer DataProducer Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDeployment Deployment Deployment of platform GP03FLMA-FM001
this PlatformDevice hasDevice PlatformDevice Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasModel PlatformModel Low Power Surface Mooring
========================================================================================================================================================================================================
========================================================================================================================================================================================================
lcstate DEVELOPED
_rev 2
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-RI001 device #02
reference_urls []
ts_updated 1393463134256
commissioned 2
ts_created 1393463134160
name Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
alt_ids []
type_ PlatformDevice
_id 090ca97279d542ba8f0db28e9902f282
ports []
========================================================================================================================================================================================================
PlatformDevice Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasDataProducer DataProducer Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasModel PlatformModel Mooring Riser
========================================================================================================================================================================================================
DEPLOYED NEW:
========================================================================================================================================================================================================
lcstate INTEGRATED
_rev 3
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-FM001 device #02
reference_urls []
ts_updated 1393475036647
commissioned 2
ts_created 1393475035645
name Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
alt_ids []
type_ PlatformDevice
_id 8e7f5104d6204186955e28afe0914a4b
ports []
========================================================================================================================================================================================================
PlatformSite Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasDataProducer DataProducer Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDeployment Deployment Deployment of platform GP03FLMA-FM001
this PlatformDevice hasDevice PlatformDevice Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasModel PlatformModel Low Power Sub-surface Mooring
========================================================================================================================================================================================================
========================================================================================================================================================================================================
lcstate INTEGRATED
_rev 3
availability PRIVATE
controllable True
uuid
contacts [ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''})]
index_location GeospatialIndex({'lat': 0.0, 'lon': 0.0})
custom_attributes {}
platform_monitor_attributes []
serial_number
addl {}
monitorable True
message_controllable True
description Platform GP03FLMA-RI001 device #02
reference_urls []
ts_updated 1393475036659
commissioned 2
ts_created 1393475035785
name Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
alt_ids []
type_ PlatformDevice
_id 78f6769696c44e79863f491a0829a92a
ports []
========================================================================================================================================================================================================
PlatformDevice Low Power Sub-surface Mooring A - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
PlatformSite Mooring Riser 001 - Global Station Papa Mesoscale Flanking A hasDevice this PlatformDevice
Org CGSN Facility hasResource this PlatformDevice
========================================================================================================================================================================================================
this PlatformDevice hasDataProducer DataProducer Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasDevice InstrumentDevice CTD Mooring (Inductive) on Mooring Riser 001 - Global Station Papa Mesoscale Flanking A
this PlatformDevice hasModel PlatformModel Mooring Riser
========================================================================================================================================================================================================
"""
| {
"content_hash": "21f9f2bb17981fe671357d5e1c43b8ab",
"timestamp": "",
"source": "github",
"line_count": 1184,
"max_line_length": 982,
"avg_line_length": 63.10557432432432,
"alnum_prop": 0.4347069609325856,
"repo_name": "ooici/coi-services",
"id": "8e78b5c8f5adf07dbe421efd9f2f58d825fb0971",
"size": "74755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ion/services/sa/observatory/test/activation_test_resources.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "403012"
},
{
"name": "C++",
"bytes": "251803"
},
{
"name": "CSS",
"bytes": "689"
},
{
"name": "Erlang",
"bytes": "532"
},
{
"name": "JavaScript",
"bytes": "11627"
},
{
"name": "Objective-C",
"bytes": "8918"
},
{
"name": "Python",
"bytes": "7964384"
},
{
"name": "Shell",
"bytes": "9221"
},
{
"name": "nesC",
"bytes": "57712131"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models.fields.related import ManyToManyField, RelatedField, ManyToManyRel, ManyToOneRel, ForeignKey
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import RequestError, NotFoundError
from search.exceptions import ValueNotFoundError
logging.basicConfig()
logger = logging.getLogger(__name__)
class ElasticsearchIndexer:
"""
Adds an index process for each indicators, workflowlevel 1 and 2 and
collecteddata models.
To separate indices of different servers a prefix can be defined in
settings.
"""
es = None
def __init__(self):
if not settings.ELASTICSEARCH_ENABLED:
return
if settings.ELASTICSEARCH_URL is not None:
self.es = Elasticsearch([settings.ELASTICSEARCH_URL], timeout=30,
max_retries=10, retry_on_timeout=True)
if settings.ELASTICSEARCH_INDEX_PREFIX is not None:
self.prefix = settings.ELASTICSEARCH_INDEX_PREFIX + '_'
else:
self.prefix = ''
def index_indicator(self, indicator):
if self.es is None:
return
data = self.get_field_data(indicator)
for wf1 in indicator.workflowlevel1.all():
if wf1.organization is not None and wf1.organization is not None:
org_uuid = str(wf1.organization.organization_uuid)
# aggregate related models
data['workflowlevel1'] = list(map(lambda w: w.name, [wf1]))
# index data with elasticsearch
try:
self.es.index(index=self.prefix + org_uuid + "_indicators",
id=data['id'], doc_type='indicator',
body=data)
except RequestError:
logger.error('Error indexing indicator', exc_info=True)
else:
continue
def delete_indicator(self, indicator):
if self.es is None:
return
try:
for org_uuid in indicator.workflowlevel1.values_list('organization__organization_uuid'):
org_uuid = str(org_uuid[0])
self.es.delete(index=self.prefix + org_uuid + "_indicators", id=indicator.id, doc_type='indicator')
except NotFoundError:
logger.warning('Indicator not found in Elasticsearch', exc_info=True)
raise ValueNotFoundError
def index_workflowlevel1(self, wf):
if self.es is None:
return
if wf.organization is not None:
org_uuid = str(wf.organization.organization_uuid)
# aggregate related models
data = self.get_field_data(wf)
# aggregate related models
data['sectors'] = list(map(lambda s: s.sector, wf.sector.all()))
data['country'] = list(map(lambda c: self.get_field_data(c), wf.country.all()))
# index data with elasticsearch
try:
self.es.index(index=self.prefix + org_uuid + "_workflow_level1", id=data['level1_uuid'], doc_type='workflow', body=data)
except RequestError:
logger.error('Error indexing workflowlevel1', exc_info=True)
def index_workflowlevel2(self, wf):
if self.es is None:
return
if wf.workflowlevel1.organization is not None and wf.workflowlevel1.organization is not None:
org_uuid = str(wf.workflowlevel1.organization.organization_uuid)
# get model field data
data = self.get_field_data(wf)
# aggregate related models
data['sector'] = wf.sector.sector if wf.sector is not None else None
data['workflowlevel1'] = self.get_field_data(wf.workflowlevel1)
data['indicators'] = list(map(lambda i: self.get_field_data(i), wf.indicators.all()))
data['stakeholder'] = list(map(lambda s: self.get_field_data(s), wf.stakeholder.all()))
data['site'] = list(map(lambda s: self.get_field_data(s), wf.site.all()))
# index data with elasticsearch
try:
self.es.index(index=self.prefix + org_uuid + "_workflow_level2", id=data['level2_uuid'], doc_type='workflow', body=data)
except RequestError:
logger.error('Error indexing workflowlevel2', exc_info=True)
def delete_workflowlevel1(self, wf):
if self.es is None:
return
try:
if wf.organization is not None and wf.organization is not None:
org_uuid = str(wf.organization.organization_uuid)
self.es.delete(index=self.prefix + org_uuid + "_workflow_level1", id=wf.level1_uuid, doc_type='workflow')
except RequestError:
logger.error('Error deleting workflowlevel1 from index', exc_info=True)
def delete_workflowlevel2(self, wf):
if self.es is None:
return
try:
if wf.workflowlevel1.organization is not None and wf.workflowlevel1.organization is not None:
org_uuid = str(wf.workflowlevel1.organization.organization_uuid)
self.es.delete(index=self.prefix + org_uuid + "_workflow_level2", id=wf.level2_uuid, doc_type='workflow')
except RequestError:
logger.error('Error deleting workflowlevel2 from index', exc_info=True)
def index_collecteddata(self, d):
if self.es is None:
return
if d.workflowlevel1 is not None and d.workflowlevel1.organization is not None:
org_uuid = str(d.workflowlevel1.organization.organization_uuid)
# get model field data
data = self.get_field_data(d)
# aggregate related models
di = d.indicator
data['indicator'] = di.name
# index data with elasticsearch
try:
self.es.index(index=self.prefix + org_uuid + "_collected_data", id=data['data_uuid'], doc_type='data_collection', body=data)
except RequestError:
logger.error('Error indexing collected data', exc_info=True)
def delete_collecteddata(self, d):
if self.es is None:
return
try:
if d.workflowlevel1.organization is not None:
org_uuid = str(d.workflowlevel1.organization.organization_uuid)
self.es.delete(index=self.prefix + org_uuid + "_collected_data", id=d.data_uuid, doc_type='data_collection')
except RequestError:
logger.error('Error deleting collected data from index', exc_info=True)
def get_field_data(self, obj):
if self.es is None:
return
"""
Returns all field data that is stored in a related model
:param obj: the object to retrieve data from
:return: dict of object data with field names as keys
"""
data = {}
for f in obj._meta.get_fields():
if type(f) is not ManyToManyField \
and type(f) is not ManyToManyRel \
and type(f) is not ManyToOneRel \
and type(f) is not ForeignKey \
and type(f) is not RelatedField:
data[f.name] = getattr(obj, f.name)
return data
| {
"content_hash": "d8654c84227a910db706da4dbbafcf50",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 140,
"avg_line_length": 39.05291005291005,
"alnum_prop": 0.6012735401707086,
"repo_name": "toladata/TolaActivity",
"id": "9c5c8666ed0f70d566830bbf12d17b04a3a1e562",
"size": "7381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "search/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "497127"
},
{
"name": "JavaScript",
"bytes": "114367"
},
{
"name": "Python",
"bytes": "786590"
},
{
"name": "Shell",
"bytes": "889"
}
],
"symlink_target": ""
} |
"""A source and a sink for reading from and writing to text files."""
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from typing import Optional
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = [
'ReadFromText',
'ReadFromTextWithFilename',
'ReadAllFromText',
'WriteToText'
]
_LOGGER = logging.getLogger(__name__)
class _TextSource(filebasedsource.FileBasedSource):
r"""A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
"""
DEFAULT_READ_BUFFER_SIZE = 8192
class ReadBuffer(object):
# A buffer that gives the buffered data and next position in the
# buffer that should be read.
def __init__(self, data, position):
self._data = data
self._position = position
@property
def data(self):
return self._data
@data.setter
def data(self, value):
assert isinstance(value, bytes)
self._data = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
assert isinstance(value, (int, long))
if value > len(self._data):
raise ValueError(
'Cannot set position to %d since it\'s larger than '
'size of data %d.' % (value, len(self._data)))
self._position = value
def reset(self):
self.data = b''
self.position = 0
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder, # type: coders.Coder
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
"""Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
"""
super(_TextSource, self).__init__(
file_pattern,
min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError(
'Cannot skip negative number of header lines: %d' % skip_header_lines)
elif skip_header_lines > 10:
_LOGGER.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def display_data(self):
parent_dd = super(_TextSource, self).display_data()
parent_dd['strip_newline'] = DisplayDataItem(
self._strip_trailing_newlines, label='Strip Trailing New Lines')
parent_dd['buffer_size'] = DisplayDataItem(
self._buffer_size, label='Buffer Size')
parent_dd['coder'] = DisplayDataItem(self._coder.__class__, label='Coder')
return parent_dd
def read_records(self, file_name, range_tracker):
start_offset = range_tracker.start_position()
read_buffer = _TextSource.ReadBuffer(b'', 0)
next_record_start_position = -1
def split_points_unclaimed(stop_position):
return (
0 if stop_position <= next_record_start_position else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
with self.open_file(file_name) as file_to_read:
position_after_processing_header_lines = (
self._process_header(file_to_read, read_buffer))
start_offset = max(start_offset, position_after_processing_header_lines)
if start_offset > position_after_processing_header_lines:
# Seeking to one position before the start index and ignoring the
# current line. If start_position is at beginning if the line, that line
# belongs to the current bundle, hence ignoring that is incorrect.
# Seeking to one byte before prevents that.
file_to_read.seek(start_offset - 1)
read_buffer.reset()
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
if not sep_bounds:
# Could not find a separator after (start_offset - 1). This means that
# none of the records within the file belongs to the current source.
return
_, sep_end = sep_bounds
read_buffer.data = read_buffer.data[sep_end:]
next_record_start_position = start_offset - 1 + sep_end
else:
next_record_start_position = position_after_processing_header_lines
while range_tracker.try_claim(next_record_start_position):
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
# For compressed text files that use an unsplittable OffsetRangeTracker
# with infinity as the end position, above 'try_claim()' invocation
# would pass for an empty record at the end of file that is not
# followed by a new line character. Since such a record is at the last
# position of a file, it should not be a part of the considered range.
# We do this check to ignore such records.
if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition
break
# Record separator must be larger than zero bytes.
assert num_bytes_to_next_record != 0
if num_bytes_to_next_record > 0:
next_record_start_position += num_bytes_to_next_record
yield self._coder.decode(record)
if num_bytes_to_next_record < 0:
break
def _process_header(self, file_to_read, read_buffer):
# Returns a tuple containing the position in file after processing header
# records and a list of decoded header lines that match
# 'header_matcher'.
header_lines = []
position = self._skip_lines(
file_to_read, read_buffer,
self._skip_header_lines) if self._skip_header_lines else 0
if self._header_matcher:
while True:
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
decoded_line = self._coder.decode(record)
if not self._header_matcher(decoded_line):
# We've read past the header section at this point, so go back a line.
file_to_read.seek(position)
read_buffer.reset()
break
header_lines.append(decoded_line)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
if self._header_processor:
self._header_processor(header_lines)
return position
def _find_separator_bounds(self, file_to_read, read_buffer):
# Determines the start and end positions within 'read_buffer.data' of the
# next separator starting from position 'read_buffer.position'.
# Currently supports following separators.
# * '\n'
# * '\r\n'
# This method may increase the size of buffer but it will not decrease the
# size of it.
current_pos = read_buffer.position
while True:
if current_pos >= len(read_buffer.data):
# Ensuring that there are enough bytes to determine if there is a '\n'
# at current_pos.
if not self._try_to_ensure_num_bytes_in_buffer(
file_to_read, read_buffer, current_pos + 1):
return
# Using find() here is more efficient than a linear scan of the byte
# array.
next_lf = read_buffer.data.find(b'\n', current_pos)
if next_lf >= 0:
if next_lf > 0 and read_buffer.data[next_lf - 1:next_lf] == b'\r':
# Found a '\r\n'. Accepting that as the next separator.
return (next_lf - 1, next_lf + 1)
else:
# Found a '\n'. Accepting that as the next separator.
return (next_lf, next_lf + 1)
current_pos = len(read_buffer.data)
def _try_to_ensure_num_bytes_in_buffer(
self, file_to_read, read_buffer, num_bytes):
# Tries to ensure that there are at least num_bytes bytes in the buffer.
# Returns True if this can be fulfilled, returned False if this cannot be
# fulfilled due to reaching EOF.
while len(read_buffer.data) < num_bytes:
read_data = file_to_read.read(self._buffer_size)
if not read_data:
return False
read_buffer.data += read_data
return True
def _skip_lines(self, file_to_read, read_buffer, num_lines):
"""Skip num_lines from file_to_read, return num_lines+1 start position."""
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
# We reached end of file. It is OK to just break here
# because subsequent _read_record will return same result.
break
position += num_bytes_to_next_record
return position
def _read_record(self, file_to_read, read_buffer):
# Returns a tuple containing the current_record and number of bytes to the
# next record starting from 'read_buffer.position'. If EOF is
# reached, returns a tuple containing the current record and -1.
if read_buffer.position > self._buffer_size:
# read_buffer is too large. Truncating and adjusting it.
read_buffer.data = read_buffer.data[read_buffer.position:]
read_buffer.position = 0
record_start_position_in_buffer = read_buffer.position
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
read_buffer.position = sep_bounds[1] if sep_bounds else len(
read_buffer.data)
if not sep_bounds:
# Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for
# the starting position of the next record.
return (read_buffer.data[record_start_position_in_buffer:], -1)
if self._strip_trailing_newlines:
# Current record should not contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[0]],
sep_bounds[1] - record_start_position_in_buffer)
else:
# Current record should contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[1]],
sep_bounds[1] - record_start_position_in_buffer)
class _TextSourceWithFilename(_TextSource):
def read_records(self, file_name, range_tracker):
records = super(_TextSourceWithFilename,
self).read_records(file_name, range_tracker)
for record in records:
yield (file_name, record)
class _TextSink(filebasedsink.FileBasedSink):
"""A sink to a GCS or local text file or files."""
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToBytesCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
"""Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '\n' will be added.
Returns:
A _TextSink object usable for writing.
"""
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
def open(self, temp_path):
file_handle = super(_TextSink, self).open(temp_path)
if self._header is not None:
file_handle.write(coders.ToBytesCoder().encode(self._header))
if self._append_trailing_newlines:
file_handle.write(b'\n')
return file_handle
def display_data(self):
dd_parent = super(_TextSink, self).display_data()
dd_parent['append_newline'] = DisplayDataItem(
self._append_trailing_newlines, label='Append Trailing New Lines')
return dd_parent
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record."""
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
def _create_text_source(
file_pattern=None,
min_bundle_size=None,
compression_type=None,
strip_trailing_newlines=None,
coder=None,
skip_header_lines=None):
return _TextSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=False,
skip_header_lines=skip_header_lines)
class ReadAllFromText(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
skip_header_lines=0,
**kwargs):
"""Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
"""
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True,
compression_type,
desired_bundle_size,
min_bundle_size,
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromText(PTransform):
r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
"""
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
"""
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromTextWithFilename(ReadFromText):
r"""A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
"""
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files."""
def __init__(
self,
file_path_prefix, # type: str
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None, # type: Optional[str]
coder=coders.ToBytesCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
r"""Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
"""
self._sink = _TextSink(
file_path_prefix,
file_name_suffix,
append_trailing_newlines,
num_shards,
shard_name_template,
coder,
compression_type,
header)
def expand(self, pcoll):
return pcoll | Write(self._sink)
| {
"content_hash": "15d2bebbb610a8cb0f860b5da96b5075",
"timestamp": "",
"source": "github",
"line_count": 623,
"max_line_length": 97,
"avg_line_length": 39.47833065810594,
"alnum_prop": 0.6627363285220573,
"repo_name": "iemejia/incubator-beam",
"id": "0660b8747f3a133ae8d36828c8cb6092b0040a04",
"size": "25380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/textio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
import torch
from .. import settings
from .broadcasting import _mul_broadcast_shape, _pad_with_singletons
# A slice that does nothing to a dimension
_noop_index = slice(None, None, None)
def _compute_getitem_size(obj, indices):
"""
Given an object and a tuple of indices, computes the final size of the
Indices is a tuple containing ints, slices, and tensors
.. note::
The length of indices must match the dimensionality of obj
Args:
obj - tensor or LazyTensor
indices - tuple of ints, slices, tensors
Returns:
:class:`torch.Size`
"""
if obj.dim() != len(indices):
raise RuntimeError(
"_compute_getitem_size assumes that obj (size: {}) and indices (len: {}) have the "
"same dimensionality.".format(obj.shape, len(indices))
)
final_shape = []
tensor_idx = None
tensor_idx_shape = None
slice_after_tensor_idx = False
for i, (size, idx) in enumerate(zip(obj.shape, indices)):
# Handle slice: that dimension gets downsized
if isinstance(idx, slice):
if idx == _noop_index:
final_shape.append(size)
else:
final_shape.append(len(range(*idx.indices(size))))
# If we don't have a continuous set of tensor indices, then the tensor indexed part
# goes to the front
if tensor_idx is not None:
slice_after_tensor_idx = True
# Handle int: we "lose" that dimension
elif isinstance(idx, int):
if settings.debug.on():
try:
range(size)[idx]
except IndexError:
raise IndexError(
"index element {} ({}) is invalid: out of range for obj of size "
"{}.".format(i, idx, obj.shape)
)
# Handle tensor index - this one is complicated
elif torch.is_tensor(idx):
if tensor_idx_shape is None:
tensor_idx_shape = idx.shape
tensor_idx = len(final_shape)
# If we don't have a continuous set of tensor indices, then the tensor indexed part
# goes to the front
else:
try:
tensor_idx_shape = _mul_broadcast_shape(tensor_idx_shape, idx.shape)
except RuntimeError:
raise IndexError(
"Incompatible tensor indices in index - got shapes of {} .".format(
[idx.shape for idx in indices if torch.is_tensor(idx)]
)
)
if slice_after_tensor_idx:
tensor_idx = 0
# If we don't have a continuous set of tensor indices, then the tensor indexed part
# goes to the front
if tensor_idx is not None:
final_shape = final_shape[:tensor_idx] + list(tensor_idx_shape) + final_shape[tensor_idx:]
return torch.Size(final_shape)
def _convert_indices_to_tensors(obj, indices):
"""
Given an index made up of tensors/slices/ints, returns a tensor-only index that has the
same outcome as the original index (when applied to the obj)
.. note::
The length of indices must match the dimensionality of obj
Args:
obj - tensor or LazyTensor
indices - tuple of slices, tensors, ints
Returns:
tuple of tensor indices (shapes of tensors will involve broadcasting)
Example:
>>> x = torch.randn(3, 6, 4)
>>> _convert_indices_to_tensors(x, (torch.tensor([0, 1]), 2, slice(None, None, None)))
>>> # (torch.tensor([[[0]], [[1]]]), torch.tensor([[[2]]]), torch.tensor([[[0, 1, 2, 3]]]))
"""
slice_indices = tuple(index for index in indices if isinstance(index, slice))
tensor_indices = tuple(index for index in indices if torch.is_tensor(index))
tensor_index_shape = _mul_broadcast_shape(*[tensor_index.shape for tensor_index in tensor_indices])
# How many dimensions will the new tensor index have?
num_final_dims = len(slice_indices) + len(tensor_index_shape)
# Determine if the tensor index is being moved to the front
tensor_index_moved_to_start = _is_tensor_index_moved_to_start(indices)
# These are counters of the number of singleton dimensions that we need to append to
# the left and right of the indices that we're converting to tensor indices
num_singletons_before = len(tensor_index_shape) if tensor_index_moved_to_start else 0
num_singletons_after = (num_final_dims - len(tensor_index_shape)) if tensor_index_moved_to_start else num_final_dims
# These are counters of the number of singleton dimensions that we need to append to
# the left and right of the indices that are currently tensor indices
num_singletons_before_tensor = 0 if tensor_index_moved_to_start else None
num_singletons_after_tensor = (num_final_dims - len(tensor_index_shape)) if tensor_index_moved_to_start else None
# Compute the size suggested by the tensor indices
new_indices = []
for dim, index in enumerate(indices):
# slice - the tensor index will represent the slice
if isinstance(index, slice):
num_singletons_after -= 1
new_index = torch.arange(0, obj.size(dim), device=obj.device)[index]
new_index = _pad_with_singletons(new_index, num_singletons_before, num_singletons_after)
num_singletons_before += 1
# int - the tensor index will have only one element
elif isinstance(index, int):
new_index = torch.tensor(index, dtype=torch.long, device=obj.device)
new_index = _pad_with_singletons(new_index, num_singletons_before, num_singletons_after)
elif torch.is_tensor(index):
# If this is the first tensor index we've seen, and we aren't moving all tensor indices to the start
# Then let's mark how much padding we need for subsequent tensor indices
if num_singletons_before_tensor is None:
num_singletons_after -= len(tensor_index_shape)
num_singletons_before_tensor = num_singletons_before
num_singletons_after_tensor = num_singletons_after
num_singletons_before += len(tensor_index_shape)
new_index = _pad_with_singletons(index, num_singletons_before_tensor, num_singletons_after_tensor)
new_indices.append(new_index)
return tuple(new_indices)
def _equal_indices(a, b):
"""
Helper which checks whether two index components (int, slice, tensor) are equal
"""
if torch.is_tensor(a) and torch.is_tensor(b):
return torch.equal(a, b)
elif not torch.is_tensor(a) and not torch.is_tensor(b):
return a == b
else:
return False
def _is_noop_index(index):
"""
Determine if a given index is a noop (e.g. ":")
"""
return isinstance(index, slice) and index == _noop_index
def _is_tensor_index_moved_to_start(indices):
"""
Given an index, determine if the indexed part of the getitem is moved to the zero'th dimension
"""
has_tensor_index = False
continuous_tensor_index = True
if torch.is_tensor(indices[0]):
return True
for index in indices[1:]:
if torch.is_tensor(index):
if not has_tensor_index:
has_tensor_index = True
elif not continuous_tensor_index:
return True
elif isinstance(index, slice):
if has_tensor_index:
continuous_tensor_index = False
return False
| {
"content_hash": "adf7308ba8804e34e65b97535a1b6655",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 120,
"avg_line_length": 38.81218274111675,
"alnum_prop": 0.6139157729531781,
"repo_name": "jrg365/gpytorch",
"id": "e47e93b3a01f8de931fdd91563183d8df9563cce",
"size": "7670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpytorch/utils/getitem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6005"
},
{
"name": "C++",
"bytes": "242"
},
{
"name": "Python",
"bytes": "338860"
}
],
"symlink_target": ""
} |
from flask import request, jsonify
from src.main import app
from src.main import db
from src.models.client import Client
from src.v1.auth_controller import validate_auth
@app.route("/v1/client", methods = ['GET', 'POST'])
@validate_auth
def clients():
if request.method == 'POST':
body = request.get_json()
return add_client(body['name'])
return get_all_clients()
def get_all_clients():
clients = Client.query.all();
return jsonify(clients = [client.jsonify() for client in clients])
def add_client(name):
client = Client(name)
db.session.add(client)
db.session.commit()
return '', 201
| {
"content_hash": "79fa79b226d84a61ccc0c1eb63fd0221",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 27.695652173913043,
"alnum_prop": 0.6797488226059655,
"repo_name": "tomaszguzialek/flask-api",
"id": "e14149f0fd6c95304d8dc21657b3cef1eaebb6f9",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/v1/client_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17467"
},
{
"name": "Ruby",
"bytes": "239"
},
{
"name": "Shell",
"bytes": "4286"
}
],
"symlink_target": ""
} |
"""Delimited path generation.
"""
from .ladder import Ladder
from .utils import delimitedpathjoin
class DelimitedPath(Ladder):
"""Generate delimited strings using Ladder interface."""
__attrs__ = ['__pathway__', '__delimiter__']
def __init__(self, pathway=None, delimiter=''):
self.__pathway__ = delimitedpathjoin(delimiter, pathway)
self.__delimiter__ = delimiter
def __add__(self, other):
return self(other, delimiter=self.__delimiter__)
__div__ = __add__
__truediv__ = __add__
def __radd__(self, other):
return self.__class__(other, delimiter=self.__delimiter__)(self)
__rdiv__ = __radd__
__rtruediv__ = __radd__
def __preparestate__(self, *paths, **params):
"""Join paths with our delimiter."""
state = self.__getstate__()
state['pathway'] = delimitedpathjoin(state['delimiter'],
state['pathway'],
*paths)
return state
| {
"content_hash": "6d1ed5e80363bf2b7117b09aad8c8627",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 30.176470588235293,
"alnum_prop": 0.5419103313840156,
"repo_name": "dgilland/ladder",
"id": "e819f044d995ee11ff9de2d1e3c6bd6460f3afcc",
"size": "1026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ladder/delimitedpath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1432"
},
{
"name": "Python",
"bytes": "26879"
}
],
"symlink_target": ""
} |
"""A collection of decoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.labs.consistent_zero_shot_nmt.modules import attention_mechanisms
from language.labs.consistent_zero_shot_nmt.modules import attention_wrappers
from language.labs.consistent_zero_shot_nmt.modules import base
from language.labs.consistent_zero_shot_nmt.modules import helpers
from language.labs.consistent_zero_shot_nmt.utils import common_utils as U
from language.labs.consistent_zero_shot_nmt.utils import model_utils
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import seq2seq as contrib_seq2seq
from tensorflow.contrib import training as contrib_training
__all__ = [
"BasicRNNDecoder",
"AttentiveRNNDecoder",
"get",
]
class BasicRNNDecoder(base.AbstractNMTModule):
"""Basic multi-layer recurrent neural network decoder."""
def __init__(self, name="BasicRNNDecoder"):
super(BasicRNNDecoder, self).__init__(name=name)
# pylint: disable=arguments-differ
def _build(self, embeddings, inputs, inputs_length, hiddens, hiddens_length,
enc_state, mode, hparams, decoder_hparams=None):
if decoder_hparams is None:
decoder_hparams = contrib_training.HParams(auxiliary=False)
batch_size = common_layers.shape_list(hiddens)[0]
# Build RNN cell.
rnn_cell = self._build_rnn_cell(
embeddings=embeddings,
sequences=hiddens,
sequences_length=hiddens_length,
mode=mode,
hparams=hparams)
# Build initial state.
initial_state = self._build_init_state(
batch_size=batch_size,
enc_state=enc_state,
rnn_cell=rnn_cell,
mode=mode,
hparams=hparams)
# Build helper.
helper = self._build_helper(
batch_size=batch_size,
embeddings=embeddings,
inputs=inputs,
inputs_length=inputs_length,
mode=mode,
hparams=hparams,
decoder_hparams=decoder_hparams)
# Build decoder.
decoder = self._build_decoder(
helper=helper,
rnn_cell=rnn_cell,
initial_state=initial_state,
mode=mode,
hparams=hparams)
return decoder
def _build_attention(self, memory, memory_sequence_length, mode, hparams):
"""Builds attention mechanism for attending over the hiddens."""
del mode # Unused.
return attention_mechanisms.get(
attention_type=hparams.attention_mechanism,
num_units=hparams.attention_layer_size,
memory=memory,
memory_sequence_length=memory_sequence_length,
scope="attention_over_hiddens")
def _build_rnn_cell(self, embeddings, sequences, sequences_length,
mode, hparams):
"""Builds RNN cell for decoding."""
del embeddings # Unused.
# Build attention.
attention_mechanism = self._build_attention(
memory=sequences,
memory_sequence_length=sequences_length,
mode=mode,
hparams=hparams)
# Choose attention architecture.
if hparams.attention_gnmt:
create_rnn_cell_fn = model_utils.create_gnmt_rnn_cell
else:
create_rnn_cell_fn = model_utils.create_rnn_cell
# Create base RNN cell with attention.
rnn_cell = create_rnn_cell_fn(
attention_mechanism=attention_mechanism,
attention_layer_size=hparams.attention_layer_size,
output_attention=(hparams.output_attention == 1),
unit_type=hparams.rnn_unit_type,
num_units=hparams.hidden_size,
num_layers=hparams.dec_num_layers,
num_residual_layers=hparams.dec_num_residual_layers,
forget_bias=hparams.rnn_forget_bias,
dropout=hparams.dropout,
mode=mode)
return rnn_cell
def _build_init_state(self, batch_size, enc_state, rnn_cell, mode, hparams):
"""Builds initial states for the given RNN cells."""
del mode # Unused.
# Build init state.
init_state = rnn_cell.zero_state(batch_size, tf.float32)
if hparams.pass_hidden_state:
# Non-GNMT RNN cell returns AttentionWrappedState.
if isinstance(init_state, contrib_seq2seq.AttentionWrapperState):
init_state = init_state.clone(cell_state=enc_state)
# GNMT RNN cell returns a tuple state.
elif isinstance(init_state, tuple):
init_state = tuple(
zs.clone(cell_state=es) if isinstance(
zs, contrib_seq2seq.AttentionWrapperState) else es
for zs, es in zip(init_state, enc_state))
else:
ValueError("RNN cell returns zero states of unknown type: %s"
% str(type(init_state)))
return init_state
def _build_helper(self, batch_size, embeddings, inputs, inputs_length,
mode, hparams, decoder_hparams):
"""Builds a helper instance for BasicDecoder."""
# Auxiliary decoding mode at training time.
if decoder_hparams.auxiliary:
start_tokens = tf.fill([batch_size], text_encoder.PAD_ID)
# helper = helpers.FixedContinuousEmbeddingHelper(
# embedding=embeddings,
# start_tokens=start_tokens,
# end_token=text_encoder.EOS_ID,
# num_steps=hparams.aux_decode_length)
helper = contrib_seq2seq.SampleEmbeddingHelper(
embedding=embeddings,
start_tokens=start_tokens,
end_token=text_encoder.EOS_ID,
softmax_temperature=None)
# Continuous decoding.
elif hparams.decoder_continuous:
# Scheduled mixing.
if mode == tf_estimator.ModeKeys.TRAIN and hparams.scheduled_training:
helper = helpers.ScheduledContinuousEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=inputs_length,
mixing_concentration=hparams.scheduled_mixing_concentration)
# Pure continuous decoding (hard to train!).
elif mode == tf_estimator.ModeKeys.TRAIN:
helper = helpers.ContinuousEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=inputs_length)
# EVAL and PREDICT expect teacher forcing behavior.
else:
helper = contrib_seq2seq.TrainingHelper(
inputs=inputs, sequence_length=inputs_length)
# Standard decoding.
else:
# Scheduled sampling.
if mode == tf_estimator.ModeKeys.TRAIN and hparams.scheduled_training:
helper = contrib_seq2seq.ScheduledEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=inputs_length,
embedding=embeddings,
sampling_probability=hparams.scheduled_sampling_probability)
# Teacher forcing (also for EVAL and PREDICT).
else:
helper = contrib_seq2seq.TrainingHelper(
inputs=inputs, sequence_length=inputs_length)
return helper
def _build_decoder(self, helper, rnn_cell, initial_state, mode, hparams):
"""Builds a decoder instance."""
del mode # Unused.
del hparams # Unused.
decoder = contrib_seq2seq.BasicDecoder(
cell=rnn_cell, helper=helper, initial_state=initial_state)
return decoder
class AttentiveRNNDecoder(BasicRNNDecoder):
"""Decodes by attending over the embedding vocabulary."""
def __init__(self, name="AttentiveRNNDecoder"):
super(AttentiveRNNDecoder, self).__init__(name=name)
def _wrap_with_attention(self, cell, memory, hparams):
"""Wraps RNN cell and memory with attention."""
# Get decoding attention mechanism.
with tf.variable_scope("decoding_attention", reuse=tf.AUTO_REUSE):
attention_mechanism = attention_wrappers.FixedMemoryLuongAttention(
num_units=hparams.hidden_size,
memory=memory,
memory_sequence_length=None,
scale=True,
name="FixedMemoryAttention")
# Wrap RNN cell.
wrapped_cell = attention_wrappers.FixedMemoryAttentionWrapper(
cell, attention_mechanism,
attention_layer_size=None,
cell_input_fn=lambda inputs, attention: inputs,
output_attention=True)
return wrapped_cell
def _build_rnn_cell(self, embeddings, sequences, sequences_length,
mode, hparams):
"""Builds attentive RNN cell for decoding."""
# Build RNN cell.
rnn_cell = super(AttentiveRNNDecoder, self)._build_rnn_cell(
embeddings=embeddings,
sequences=sequences,
sequences_length=sequences_length,
mode=mode,
hparams=hparams)
# Wrap cell with attention over the target embedding vocabulary.
memory = tf.expand_dims(embeddings, 0)
rnn_cell = self._wrap_with_attention(
cell=rnn_cell,
memory=memory,
hparams=hparams)
return rnn_cell
def _build_init_state(self, batch_size, enc_state, rnn_cell, mode, hparams):
"""Builds initial states for the given RNN cells."""
# Build init state.
init_state = rnn_cell.zero_state(batch_size, tf.float32)
inner_state = init_state.cell_state
if hparams.pass_hidden_state:
# Non-GNMT RNN cell returns AttentionWrappedState.
if isinstance(inner_state, contrib_seq2seq.AttentionWrapperState):
init_state = init_state.clone(
cell_state=inner_state.clone(cell_state=enc_state))
# GNMT RNN cell returns a tuple state.
elif isinstance(init_state.cell_state, tuple):
init_state = init_state.clone(
cell_state=tuple(
zs.clone(cell_state=es) if isinstance(
zs, contrib_seq2seq.AttentionWrapperState) else es
for zs, es in zip(inner_state, enc_state)))
else:
ValueError("RNN cell returns zero states of unknown type: %s"
% str(type(init_state)))
return init_state
def get(decoder_type):
"""Returns a decoder instance of the specified type."""
if decoder_type == U.DEC_BASIC:
decoder = BasicRNNDecoder()
elif decoder_type == U.DEC_ATTENTIVE:
decoder = AttentiveRNNDecoder()
else:
raise ValueError("Unknown decoder type: %s. The type must be one of %s."
% (decoder_type, str(U.DEC_TYPES)))
return decoder
| {
"content_hash": "e6e89e9bb720861e74cace21592d27ec",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 79,
"avg_line_length": 36.276595744680854,
"alnum_prop": 0.6687194525904203,
"repo_name": "google-research/language",
"id": "18d63ac88c965690ba4e5b8ce7c2e4e3189aced2",
"size": "10845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/labs/consistent_zero_shot_nmt/modules/decoders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class EnvelopesInformation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'end_position': 'str',
'envelopes': 'list[Envelope]',
'envelope_transaction_statuses': 'list[EnvelopeTransactionStatus]',
'folders': 'list[Folder]',
'last_queried_date_time': 'str',
'next_uri': 'str',
'previous_uri': 'str',
'result_set_size': 'str',
'start_position': 'str',
'total_set_size': 'str'
}
attribute_map = {
'continuation_token': 'continuationToken',
'end_position': 'endPosition',
'envelopes': 'envelopes',
'envelope_transaction_statuses': 'envelopeTransactionStatuses',
'folders': 'folders',
'last_queried_date_time': 'lastQueriedDateTime',
'next_uri': 'nextUri',
'previous_uri': 'previousUri',
'result_set_size': 'resultSetSize',
'start_position': 'startPosition',
'total_set_size': 'totalSetSize'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""EnvelopesInformation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._continuation_token = None
self._end_position = None
self._envelopes = None
self._envelope_transaction_statuses = None
self._folders = None
self._last_queried_date_time = None
self._next_uri = None
self._previous_uri = None
self._result_set_size = None
self._start_position = None
self._total_set_size = None
self.discriminator = None
setattr(self, "_{}".format('continuation_token'), kwargs.get('continuation_token', None))
setattr(self, "_{}".format('end_position'), kwargs.get('end_position', None))
setattr(self, "_{}".format('envelopes'), kwargs.get('envelopes', None))
setattr(self, "_{}".format('envelope_transaction_statuses'), kwargs.get('envelope_transaction_statuses', None))
setattr(self, "_{}".format('folders'), kwargs.get('folders', None))
setattr(self, "_{}".format('last_queried_date_time'), kwargs.get('last_queried_date_time', None))
setattr(self, "_{}".format('next_uri'), kwargs.get('next_uri', None))
setattr(self, "_{}".format('previous_uri'), kwargs.get('previous_uri', None))
setattr(self, "_{}".format('result_set_size'), kwargs.get('result_set_size', None))
setattr(self, "_{}".format('start_position'), kwargs.get('start_position', None))
setattr(self, "_{}".format('total_set_size'), kwargs.get('total_set_size', None))
@property
def continuation_token(self):
"""Gets the continuation_token of this EnvelopesInformation. # noqa: E501
# noqa: E501
:return: The continuation_token of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._continuation_token
@continuation_token.setter
def continuation_token(self, continuation_token):
"""Sets the continuation_token of this EnvelopesInformation.
# noqa: E501
:param continuation_token: The continuation_token of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._continuation_token = continuation_token
@property
def end_position(self):
"""Gets the end_position of this EnvelopesInformation. # noqa: E501
The last position in the result set. # noqa: E501
:return: The end_position of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""Sets the end_position of this EnvelopesInformation.
The last position in the result set. # noqa: E501
:param end_position: The end_position of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._end_position = end_position
@property
def envelopes(self):
"""Gets the envelopes of this EnvelopesInformation. # noqa: E501
# noqa: E501
:return: The envelopes of this EnvelopesInformation. # noqa: E501
:rtype: list[Envelope]
"""
return self._envelopes
@envelopes.setter
def envelopes(self, envelopes):
"""Sets the envelopes of this EnvelopesInformation.
# noqa: E501
:param envelopes: The envelopes of this EnvelopesInformation. # noqa: E501
:type: list[Envelope]
"""
self._envelopes = envelopes
@property
def envelope_transaction_statuses(self):
"""Gets the envelope_transaction_statuses of this EnvelopesInformation. # noqa: E501
# noqa: E501
:return: The envelope_transaction_statuses of this EnvelopesInformation. # noqa: E501
:rtype: list[EnvelopeTransactionStatus]
"""
return self._envelope_transaction_statuses
@envelope_transaction_statuses.setter
def envelope_transaction_statuses(self, envelope_transaction_statuses):
"""Sets the envelope_transaction_statuses of this EnvelopesInformation.
# noqa: E501
:param envelope_transaction_statuses: The envelope_transaction_statuses of this EnvelopesInformation. # noqa: E501
:type: list[EnvelopeTransactionStatus]
"""
self._envelope_transaction_statuses = envelope_transaction_statuses
@property
def folders(self):
"""Gets the folders of this EnvelopesInformation. # noqa: E501
# noqa: E501
:return: The folders of this EnvelopesInformation. # noqa: E501
:rtype: list[Folder]
"""
return self._folders
@folders.setter
def folders(self, folders):
"""Sets the folders of this EnvelopesInformation.
# noqa: E501
:param folders: The folders of this EnvelopesInformation. # noqa: E501
:type: list[Folder]
"""
self._folders = folders
@property
def last_queried_date_time(self):
"""Gets the last_queried_date_time of this EnvelopesInformation. # noqa: E501
# noqa: E501
:return: The last_queried_date_time of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._last_queried_date_time
@last_queried_date_time.setter
def last_queried_date_time(self, last_queried_date_time):
"""Sets the last_queried_date_time of this EnvelopesInformation.
# noqa: E501
:param last_queried_date_time: The last_queried_date_time of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._last_queried_date_time = last_queried_date_time
@property
def next_uri(self):
"""Gets the next_uri of this EnvelopesInformation. # noqa: E501
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:return: The next_uri of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._next_uri
@next_uri.setter
def next_uri(self, next_uri):
"""Sets the next_uri of this EnvelopesInformation.
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:param next_uri: The next_uri of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._next_uri = next_uri
@property
def previous_uri(self):
"""Gets the previous_uri of this EnvelopesInformation. # noqa: E501
The postal code for the billing address. # noqa: E501
:return: The previous_uri of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._previous_uri
@previous_uri.setter
def previous_uri(self, previous_uri):
"""Sets the previous_uri of this EnvelopesInformation.
The postal code for the billing address. # noqa: E501
:param previous_uri: The previous_uri of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._previous_uri = previous_uri
@property
def result_set_size(self):
"""Gets the result_set_size of this EnvelopesInformation. # noqa: E501
The number of results returned in this response. # noqa: E501
:return: The result_set_size of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._result_set_size
@result_set_size.setter
def result_set_size(self, result_set_size):
"""Sets the result_set_size of this EnvelopesInformation.
The number of results returned in this response. # noqa: E501
:param result_set_size: The result_set_size of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._result_set_size = result_set_size
@property
def start_position(self):
"""Gets the start_position of this EnvelopesInformation. # noqa: E501
Starting position of the current result set. # noqa: E501
:return: The start_position of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""Sets the start_position of this EnvelopesInformation.
Starting position of the current result set. # noqa: E501
:param start_position: The start_position of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._start_position = start_position
@property
def total_set_size(self):
"""Gets the total_set_size of this EnvelopesInformation. # noqa: E501
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:return: The total_set_size of this EnvelopesInformation. # noqa: E501
:rtype: str
"""
return self._total_set_size
@total_set_size.setter
def total_set_size(self, total_set_size):
"""Sets the total_set_size of this EnvelopesInformation.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:param total_set_size: The total_set_size of this EnvelopesInformation. # noqa: E501
:type: str
"""
self._total_set_size = total_set_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EnvelopesInformation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvelopesInformation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EnvelopesInformation):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "6a1a16ba0fab164a71ef47093dd87325",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 193,
"avg_line_length": 33.72959183673469,
"alnum_prop": 0.614354863106943,
"repo_name": "docusign/docusign-python-client",
"id": "34ff8a172b6cc0020a490327e1c49e218b041a45",
"size": "13239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docusign_esign/models/envelopes_information.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9687716"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='news',
name='description',
),
migrations.AddField(
model_name='news',
name='edit_date',
field=models.DateField(auto_now=True, verbose_name='Дата зміни'),
),
]
| {
"content_hash": "2d64ab27872f9e08e4f4ae5bfd5e3d36",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 22.545454545454547,
"alnum_prop": 0.5604838709677419,
"repo_name": "aodarc/tennis_club",
"id": "573431d196b3448ef5f16e7d6da8435d626cd4c8",
"size": "578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news/migrations/0002_auto_20161023_1920.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "478738"
},
{
"name": "HTML",
"bytes": "119163"
},
{
"name": "JavaScript",
"bytes": "343787"
},
{
"name": "Python",
"bytes": "33741"
}
],
"symlink_target": ""
} |
"""Websocket API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import errno
import fnmatch
import glob
import heapq
import io
import json
import logging
import os
import re
import sqlite3
import threading
import time
import uuid
import tornado.websocket
import six
from six.moves import urllib_parse
from treadmill import dirwatch
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
def make_handler(pubsub):
"""Make websocket handler factory."""
class _WS(tornado.websocket.WebSocketHandler):
"""Base class contructor"""
def __init__(self, application, request, **kwargs):
"""Default constructor for tornado.websocket.WebSocketHandler"""
tornado.websocket.WebSocketHandler.__init__(
self, application, request, **kwargs
)
self._request_id = str(uuid.uuid4())
self._subscriptions = set()
def active(self, sub_id=None):
"""Return true if connection (and optional subscription) is active,
false otherwise.
If connection is not active, so are all of its subscriptions.
"""
if not self.ws_connection:
return False
return sub_id is None or sub_id in self._subscriptions
def open(self, *args, **kwargs):
"""Called when connection is opened.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection opened, remote ip: %s',
self._request_id, self.request.remote_ip)
def send_msg(self, msg):
"""Send message."""
_LOGGER.info('[%s] Sending message: %r', self._request_id, msg)
try:
self.write_message(msg)
except Exception: # pylint: disable=W0703
_LOGGER.exception('[%s] Error sending message: %r',
self._request_id, msg)
def send_error_msg(self, error_str, sub_id=None, close_conn=True):
"""Convenience method for logging and returning errors.
If sub_id is provided, it will be included in the error message and
subscription will be removed.
Note: this method will close the connection after sending back the
error, unless close_conn=False.
"""
error_msg = {'_error': error_str,
'when': time.time()}
if sub_id is not None:
error_msg['sub-id'] = sub_id
_LOGGER.info('[%s] Removing subscription %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
pass
self.send_msg(error_msg)
if close_conn:
_LOGGER.info('[%s] Closing connection.', self._request_id)
self.close()
def on_close(self):
"""Called when connection is closed.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection closed.', self._request_id)
def check_origin(self, origin):
"""Overriding check_origin method from base class.
This method returns true all the time.
"""
parsed_origin = urllib_parse.urlparse(origin)
_LOGGER.debug('parsed_origin: %r', parsed_origin)
return True
def on_message(self, message):
"""Manage event subscriptions."""
if not pubsub:
_LOGGER.fatal('pubsub is not configured, ignore.')
self.send_error_msg('Fatal: unexpected error', close_conn=True)
_LOGGER.info('[%s] Received message: %s',
self._request_id, message)
sub_id = None
close_conn = True
try:
sub_msg = json.loads(message)
sub_id = sub_msg.get('sub-id')
close_conn = sub_id is None
if sub_msg.get('unsubscribe') is True:
_LOGGER.info('[%s] Unsubscribing %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
self.send_error_msg(
'Invalid subscription: %s' % sub_id,
close_conn=False
)
return
if sub_id and sub_id in self._subscriptions:
self.send_error_msg(
'Subscription already exists: %s' % sub_id,
close_conn=False
)
return
topic = sub_msg.get('topic')
impl = pubsub.impl.get(topic)
if not impl:
self.send_error_msg(
'Invalid topic: %s' % topic,
sub_id=sub_id, close_conn=close_conn
)
return
subscription = impl.subscribe(sub_msg)
since = sub_msg.get('since', 0)
snapshot = sub_msg.get('snapshot', False)
if sub_id and not snapshot:
_LOGGER.info('[%s] Adding subscription %s',
self._request_id, sub_id)
self._subscriptions.add(sub_id)
for watch, pattern in subscription:
pubsub.register(watch, pattern, self, impl, since, sub_id)
if snapshot and close_conn:
_LOGGER.info('[%s] Closing connection.', self._request_id)
self.close()
except Exception as err: # pylint: disable=W0703
self.send_error_msg(str(err),
sub_id=sub_id, close_conn=close_conn)
def data_received(self, chunk):
"""Passthrough of abstract method data_received"""
pass
def on_event(self, filename, operation, _content):
"""Default event handler."""
_LOGGER.debug('%s %s', filename, operation)
return {'time': time.time(),
'filename': filename,
'op': operation}
return _WS
class DirWatchPubSub(object):
"""Pubsub dirwatch events."""
def __init__(self, root, impl=None, watches=None):
self.root = os.path.realpath(root)
self.impl = impl or {}
self.watches = watches or []
self.watcher = dirwatch.DirWatcher()
self.watcher.on_created = self._on_created
self.watcher.on_deleted = self._on_deleted
self.watcher.on_modified = self._on_modified
self.watch_dirs = set()
for watch in self.watches:
watch_dirs = self._get_watch_dirs(watch)
self.watch_dirs.update(watch_dirs)
for directory in self.watch_dirs:
_LOGGER.info('Added permanent dir watcher: %s', directory)
self.watcher.add_dir(directory)
self.ws = make_handler(self)
self.handlers = collections.defaultdict(list)
def register(self, watch, pattern, ws_handler, impl, since, sub_id=None):
"""Register handler with pattern."""
watch_dirs = self._get_watch_dirs(watch)
for directory in watch_dirs:
if ((not self.handlers[directory] and
directory not in self.watch_dirs)):
_LOGGER.info('Added dir watcher: %s', directory)
self.watcher.add_dir(directory)
# Store pattern as precompiled regex.
pattern_re = re.compile(
fnmatch.translate(pattern)
)
self.handlers[directory].append(
(pattern_re, ws_handler, impl, sub_id)
)
self._sow(watch, pattern, since, ws_handler, impl, sub_id=sub_id)
def _get_watch_dirs(self, watch):
pathname = os.path.realpath(os.path.join(self.root, watch.lstrip('/')))
return [path for path in glob.glob(pathname) if os.path.isdir(path)]
@utils.exit_on_unhandled
def _on_created(self, path):
"""On file created callback."""
_LOGGER.debug('created: %s', path)
self._handle('c', path)
@utils.exit_on_unhandled
def _on_modified(self, path):
"""On file modified callback."""
_LOGGER.debug('modified: %s', path)
self._handle('m', path)
@utils.exit_on_unhandled
def _on_deleted(self, path):
"""On file deleted callback."""
_LOGGER.debug('deleted: %s', path)
self._handle('d', path)
def _handle(self, operation, path):
"""Get event data and notify interested handlers of the change."""
directory, filename = os.path.split(path)
# Ignore (.) files, as they are temporary or "system".
if filename[0] == '.':
return
directory_handlers = self.handlers.get(directory, [])
handlers = [
(handler, impl, sub_id)
for pattern_re, handler, impl, sub_id in directory_handlers
if (handler.active(sub_id=sub_id) and
pattern_re.match(filename))
]
if not handlers:
return
if operation == 'd':
when = time.time()
content = None
else:
if '/trace/' in path:
# Specialized handling of trace files (no need to stat/read).
# If file was already deleted (trace cleanup), don't ignore it.
_, timestamp, _ = filename.split(',', 2)
when, content = float(timestamp), ''
else:
try:
when = os.stat(path).st_mtime
with io.open(path) as f:
content = f.read()
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
# If file was already deleted, ignore.
# It will be handled as 'd'.
return
raise
self._notify(handlers, path, operation, content, when)
def _notify(self, handlers, path, operation, content, when):
"""Notify interested handlers of the change."""
root_len = len(self.root)
for handler, impl, sub_id in handlers:
try:
payload = impl.on_event(path[root_len:],
operation,
content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
handler.send_msg(payload)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception('Error handling event: %s, %s, %s, %s, %s',
path, operation, content, when, sub_id)
handler.send_error_msg(
'{cls}: {err}'.format(
cls=type(err).__name__,
err=str(err)
),
sub_id=sub_id,
close_conn=sub_id is None
)
def _db_records(self, db_path, sow_table, watch, pattern, since):
"""Get matching records from db."""
# if file does not exist, do not try to open it. Opening connection
# will create the file, there is no way to prevent this from
# happening until py3.
#
if not os.path.exists(db_path):
_LOGGER.info('Ignore deleted db: %s', db_path)
return (None, None)
# There is rare condition that the db file is deleted HERE. In this
# case connection will be open, but the tables will not be there.
conn = sqlite3.connect(db_path)
# Before Python 3.7 GLOB pattern must not be parametrized to use index.
select_stmt = """
SELECT timestamp, path, data FROM %s
WHERE directory GLOB ? AND name GLOB '%s' AND timestamp >= ?
ORDER BY timestamp
""" % (sow_table, pattern)
# Return open connection, as conn.execute is cursor iterator, not
# materialized list.
try:
return conn, conn.execute(select_stmt, (watch, since,))
except sqlite3.OperationalError as db_err:
# Not sure if the file needs to be deleted at this point. As
# sow_table is a parameter, passing non-existing table can cause
# legit file to be deleted.
_LOGGER.info('Unable to execute: select from %s:%s ..., %s',
db_path, sow_table, str(db_err))
conn.close()
return (None, None)
def _sow(self, watch, pattern, since, handler, impl, sub_id=None):
"""Publish state of the world."""
if since is None:
since = 0
def _publish(item):
when, path, content = item
try:
payload = impl.on_event(str(path), None, content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
handler.send_msg(payload)
except Exception as err: # pylint: disable=W0703
_LOGGER.exception('Error handling sow event: %s, %s, %s, %s',
path, content, when, sub_id)
handler.send_error_msg(str(err), sub_id=sub_id)
db_connections = []
fs_records = self._get_fs_sow(watch, pattern, since)
sow = getattr(impl, 'sow', None)
sow_table = getattr(impl, 'sow_table', 'sow')
try:
records = []
if sow:
dbs = sorted(glob.glob(os.path.join(self.root, sow, '*')))
for db in dbs:
if os.path.basename(db).startswith('.'):
continue
conn, db_cursor = self._db_records(
db, sow_table, watch, pattern, since
)
if db_cursor:
records.append(db_cursor)
# FIXME: Figure out pylint use before assign
#
# pylint: disable=E0601
if conn:
db_connections.append(conn)
records.append(fs_records)
# Merge db and fs records, removing duplicates.
prev_path = None
for item in heapq.merge(*records):
_when, path, _content = item
if path == prev_path:
continue
prev_path = path
_publish(item)
finally:
for conn in db_connections:
if conn:
conn.close()
def _get_fs_sow(self, watch, pattern, since):
"""Get state of the world from filesystem."""
root_len = len(self.root)
fs_glob = os.path.join(self.root, watch.lstrip('/'), pattern)
files = glob.glob(fs_glob)
items = []
for filename in files:
try:
stat = os.stat(filename)
with io.open(filename) as f:
content = f.read()
if stat.st_mtime >= since:
path, when = filename[root_len:], stat.st_mtime
items.append((when, path, content))
except (IOError, OSError) as err:
# Ignore deleted files.
if err.errno != errno.ENOENT:
raise
return sorted(items)
def _gc(self):
"""Remove disconnected websocket handlers."""
for directory in list(six.viewkeys(self.handlers)):
handlers = [
(pattern, handler, impl, sub_id)
for pattern, handler, impl, sub_id in self.handlers[directory]
if handler.active(sub_id=sub_id)
]
_LOGGER.info('Number of active handlers for %s: %s',
directory, len(handlers))
if not handlers:
_LOGGER.info('No active handlers for %s', directory)
self.handlers.pop(directory, None)
if directory not in self.watch_dirs:
# Watch is not permanent, remove dir from watcher.
self.watcher.remove_dir(directory)
else:
self.handlers[directory] = handlers
@utils.exit_on_unhandled
def run(self, once=False):
"""Run event loop."""
last_gc = time.time()
while True:
wait_interval = 10
if once:
wait_interval = 0
if self.watcher.wait_for_events(wait_interval):
self.watcher.process_events()
if (time.time() - last_gc) >= wait_interval:
self._gc()
last_gc = time.time()
if once:
break
@utils.exit_on_unhandled
def run_detached(self):
"""Run event loop in separate thread."""
event_thread = threading.Thread(target=self.run)
event_thread.daemon = True
event_thread.start()
| {
"content_hash": "0b513ac3bf075f4c821d24bab1f3aed4",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 79,
"avg_line_length": 36.209016393442624,
"alnum_prop": 0.509564233163554,
"repo_name": "bretttegart/treadmill",
"id": "c272cd8c57c0e6d848e83a50d2c6921df01f2582",
"size": "17670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/websocket/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "2975485"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "56911"
}
],
"symlink_target": ""
} |
from prancercise import prancercise
dash = "-"
print dash*10 + prancercise("say_hi_to", "-") + dash*10 | {
"content_hash": "3b2973db11d68060dfe014714889161a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 55,
"avg_line_length": 21.8,
"alnum_prop": 0.6513761467889908,
"repo_name": "chrisortman/CIS-121",
"id": "7bc13dd34ddec0899785d9b382025316e26cc428",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k0776243/assignment06.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "413801"
}
],
"symlink_target": ""
} |
"""Utilities for generting GRR keys as part of config_updater run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from grr_response_core import config as grr_config
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_server import key_utils
class Error(Exception):
"""Base class for module-specific errors."""
class OpenSourceKeyUtilsRequiredError(Error):
"""Raised when OS key_utils implementation is not found."""
class KeysAlreadyExistError(Error):
"""Raised when keys we're about to generate are already present."""
def _GenerateCSRFKey(config):
"""Update a config with a random csrf key."""
secret_key = config.Get("AdminUI.csrf_secret_key", None)
if not secret_key:
key = utils.GeneratePassphrase(length=100)
config.Set("AdminUI.csrf_secret_key", key)
else:
print("Not updating csrf key as it is already set.")
def GenerateKeys(config, overwrite_keys=False):
"""Generate the keys we need for a GRR server."""
if not hasattr(key_utils, "MakeCACert"):
raise OpenSourceKeyUtilsRequiredError(
"Generate keys can only run with open source key_utils.")
if (config.Get("PrivateKeys.server_key", default=None) and
not overwrite_keys):
print(config.Get("PrivateKeys.server_key"))
raise KeysAlreadyExistError(
"Config %s already has keys, use --overwrite_keys to "
"override." % config.parser)
length = grr_config.CONFIG["Server.rsa_key_length"]
print("All keys will have a bit length of %d." % length)
print("Generating executable signing key")
executable_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=length)
config.Set("PrivateKeys.executable_signing_private_key",
executable_key.AsPEM().decode("ascii"))
config.Set("Client.executable_signing_public_key",
executable_key.GetPublicKey().AsPEM().decode("ascii"))
print("Generating CA keys")
ca_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=length)
ca_cert = key_utils.MakeCACert(ca_key)
config.Set("CA.certificate", ca_cert.AsPEM().decode("ascii"))
config.Set("PrivateKeys.ca_key", ca_key.AsPEM().decode("ascii"))
print("Generating Server keys")
server_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=length)
server_cert = key_utils.MakeCASignedCert(u"grr", server_key, ca_cert, ca_key)
config.Set("Frontend.certificate", server_cert.AsPEM().decode("ascii"))
config.Set("PrivateKeys.server_key", server_key.AsPEM().decode("ascii"))
print("Generating secret key for csrf protection.")
_GenerateCSRFKey(config)
| {
"content_hash": "e473dc4ca39ebdccb0fbf4b4fcc5e07b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 38.94202898550725,
"alnum_prop": 0.7260885746185337,
"repo_name": "dunkhong/grr",
"id": "8c6502c8eff6d38242648746128b760fb030c9e7",
"size": "2709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/bin/config_updater_keys_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
import unittest
from unittest.mock import patch
from ..exceptions import PapermillRateLimitException
from ..iorw import GCSHandler, fallback_gs_is_retriable
try:
try:
try:
from gcsfs.retry import HttpError as GCSHttpError
except ImportError:
from gcsfs.utils import HttpError as GCSHttpError
except ImportError:
from gcsfs.utils import HtmlError as GCSHttpError
except ImportError:
# Fall back to a sane import if gcsfs is missing
GCSHttpError = Exception
try:
from gcsfs.utils import RateLimitException as GCSRateLimitException
except ImportError:
# Fall back to GCSHttpError when using older library
GCSRateLimitException = GCSHttpError
def mock_gcs_fs_wrapper(exception=None, max_raises=1):
class MockGCSFileSystem(object):
def __init__(self):
self._file = MockGCSFile(exception, max_raises)
def open(self, *args, **kwargs):
return self._file
def ls(self, *args, **kwargs):
return []
return MockGCSFileSystem
class MockGCSFile(object):
def __init__(self, exception=None, max_raises=1):
self.read_count = 0
self.write_count = 0
self.exception = exception
self.max_raises = max_raises
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def read(self):
self.read_count += 1
if self.exception and self.read_count <= self.max_raises:
raise self.exception
return self.read_count
def write(self, buf):
self.write_count += 1
if self.exception and self.write_count <= self.max_raises:
raise self.exception
return self.write_count
class GCSTest(unittest.TestCase):
"""Tests for `GCS`."""
def setUp(self):
self.gcs_handler = GCSHandler()
@patch('papermill.iorw.GCSFileSystem', side_effect=mock_gcs_fs_wrapper())
def test_gcs_read(self, mock_gcs_filesystem):
client = self.gcs_handler._get_client()
self.assertEqual(self.gcs_handler.read('gs://bucket/test.ipynb'), 1)
# Check that client is only generated once
self.assertIs(client, self.gcs_handler._get_client())
@patch('papermill.iorw.GCSFileSystem', side_effect=mock_gcs_fs_wrapper())
def test_gcs_write(self, mock_gcs_filesystem):
client = self.gcs_handler._get_client()
self.assertEqual(self.gcs_handler.write('new value', 'gs://bucket/test.ipynb'), 1)
# Check that client is only generated once
self.assertIs(client, self.gcs_handler._get_client())
@patch('papermill.iorw.GCSFileSystem', side_effect=mock_gcs_fs_wrapper())
def test_gcs_listdir(self, mock_gcs_filesystem):
client = self.gcs_handler._get_client()
self.gcs_handler.listdir('testdir')
# Check that client is only generated once
self.assertIs(client, self.gcs_handler._get_client())
@patch(
'papermill.iorw.GCSFileSystem',
side_effect=mock_gcs_fs_wrapper(
GCSRateLimitException({"message": "test", "code": 429}), 10
),
)
def test_gcs_handle_exception(self, mock_gcs_filesystem):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
with patch.object(GCSHandler, 'RETRY_MULTIPLIER', 0):
with patch.object(GCSHandler, 'RETRY_MAX_DELAY', 0):
with self.assertRaises(PapermillRateLimitException):
self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb')
@patch(
'papermill.iorw.GCSFileSystem',
side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({"message": "test", "code": 429}), 1),
)
def test_gcs_retry(self, mock_gcs_filesystem):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
with patch.object(GCSHandler, 'RETRY_MULTIPLIER', 0):
with patch.object(GCSHandler, 'RETRY_MAX_DELAY', 0):
self.assertEqual(
self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'), 2
)
@patch(
'papermill.iorw.GCSFileSystem',
side_effect=mock_gcs_fs_wrapper(GCSHttpError({"message": "test", "code": 429}), 1),
)
def test_gcs_retry_older_exception(self, mock_gcs_filesystem):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
with patch.object(GCSHandler, 'RETRY_MULTIPLIER', 0):
with patch.object(GCSHandler, 'RETRY_MAX_DELAY', 0):
self.assertEqual(
self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'), 2
)
@patch('papermill.iorw.gs_is_retriable', side_effect=fallback_gs_is_retriable)
@patch(
'papermill.iorw.GCSFileSystem',
side_effect=mock_gcs_fs_wrapper(
GCSRateLimitException({"message": "test", "code": None}), 1
),
)
def test_gcs_fallback_retry_unknown_failure_code(self, mock_gcs_filesystem, mock_gcs_retriable):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
with patch.object(GCSHandler, 'RETRY_MULTIPLIER', 0):
with patch.object(GCSHandler, 'RETRY_MAX_DELAY', 0):
self.assertEqual(
self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'), 2
)
@patch('papermill.iorw.gs_is_retriable', return_value=False)
@patch(
'papermill.iorw.GCSFileSystem',
side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({"message": "test", "code": 500}), 1),
)
def test_gcs_invalid_code(self, mock_gcs_filesystem, mock_gcs_retriable):
with self.assertRaises(GCSRateLimitException):
self.gcs_handler.write('fatal_exception', 'gs://bucket/test.ipynb')
@patch('papermill.iorw.gs_is_retriable', side_effect=fallback_gs_is_retriable)
@patch(
'papermill.iorw.GCSFileSystem',
side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({"message": "test", "code": 500}), 1),
)
def test_fallback_gcs_invalid_code(self, mock_gcs_filesystem, mock_gcs_retriable):
with self.assertRaises(GCSRateLimitException):
self.gcs_handler.write('fatal_exception', 'gs://bucket/test.ipynb')
@patch(
'papermill.iorw.GCSFileSystem',
side_effect=mock_gcs_fs_wrapper(ValueError("not-a-retry"), 1),
)
def test_gcs_unretryable(self, mock_gcs_filesystem):
with self.assertRaises(ValueError):
self.gcs_handler.write('no_a_rate_limit', 'gs://bucket/test.ipynb')
| {
"content_hash": "3a85e17d4bc2ff13accb23dd82ac19bd",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 100,
"avg_line_length": 39.25443786982248,
"alnum_prop": 0.6296352125414532,
"repo_name": "nteract/papermill",
"id": "d999059903436f367849055e2fa9f820d5b2c12c",
"size": "6634",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "papermill/tests/test_gcs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "46018"
},
{
"name": "Python",
"bytes": "248159"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
} |
import os
import sys
import shutil
import datetime
import numpy as np
import cPickle
import gzip
#from profilehooks import profile
from keras.layers.recurrent_xd import RLSTM, ReducedLSTM, ReducedLSTMA, ReducedLSTMB
from keras.optimizers import RMSprop
from keras.utils.train_utils import *
from data import load_data, load_data2, load_data4, segment_data
from errors import *
from dataset import *
from config import model_savedir
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict
from constants import *
def get_train_result(model):
datetime_str = datetime.datetime.today().strftime('%Y-%m-%d %H:%M')
return '%s %2d %7.1f %7.1f %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f %4.1f %4.1f' % \
((datetime_str, model.epoch, model.train_result[0]) + tuple(model.valid_result))
# model.valid_result[0], # mse
# model.valid_result[1], model.valid_result[2], model.valid_result[3],
# model.valid_result[4], model.valid_result[5], model.valid_result[6],
# model.valid_result[7], model.valid_result[8], model.valid_result[9],
def estimate_early_stop_epoch(name, mov_avg_len=3):
log_file = name + '.log'
with open(model_savedir + log_file) as f:
epochs = [int(line.split()[2]) for line in f.readlines()]
print 'epochs =', epochs
if len(epochs) < mov_avg_len:
epoch = sum(epochs) * 1. / len(epochs)
else:
epoch = sum(epochs[-mov_avg_len:]) * 1. / mov_avg_len
print 'epoch =', epoch
return int(round(epoch))
def train_model(name, is_city=False, latest=True):
if is_city:
if name == 'shanghai':
train_data, valid_data = load_data4(stations=city2stations[name],
heating=False,
filter=True)
else:
train_data, valid_data = load_data4(stations=city2stations[name],
heating=True,
filter=True)
else:
if name == 'huabei' or name == 'dongbei':
train_data, valid_data = load_data4(lon_range=area2lonlat[name][0], lat_range=area2lonlat[name][1],
heating=True,
filter=True)
else:
train_data, valid_data = load_data4(lon_range=area2lonlat[name][0], lat_range=area2lonlat[name][1],
heating=False,
filter=True)
X_train, y_train, X_valid, y_valid = build_lstm_dataset(train_data, valid_data, pred_range=pred_range, hist_len=3)
print 'X_train[0].shape =', X_train[0].shape
rlstm = build_rlstm(X_train[0].shape[-1], h0_dim=20, h1_dim=20,
rec_layer_init='zero', fix_b_f=is_city, base_name=name,
add_input_noise=is_city, add_target_noise=False)
rlstm.name = name + '_valid'
rlstm.data = [train_data, valid_data]
rlstm.X_mask = np.ones((X_train[0].shape[-1],), dtype='int')
rlstm.X_mask[-1:] = not is_city # pm25 mean
print '\ntraining', rlstm.name
X_train[0], X_valid[0] = normalize(X_train[0], X_valid[0], rlstm)
rlstm.save_normalization_info(model_savedir + rlstm.name + '_norm_info.pkl')
batch_size = (1 + int(not is_city)) * 64
patience = 10 + int(is_city) * 10
train(X_train, y_train, X_valid, y_valid, rlstm, batch_size=batch_size, patience=patience, nb_epoch=300)
result_str = get_train_result(rlstm)
print 'result_str =', result_str
with open(model_savedir + name + '.log', 'a') as f:
f.write(result_str + '\n')
# epoch = estimate_early_stop_epoch(name)
#
# X_train, y_train, X_valid, y_valid = build_lstm_dataset(train_data2, valid_data, pred_range=pred_range, hist_len=3)
# print 'X_train[0].shape =', X_train[0].shape
# rlstm = build_rlstm(X_train[0].shape[-1], h0_dim=20, h1_dim=20,
# rec_layer_init='zero', fix_b_f=is_city, base_name=name,
# add_input_noise=is_city, add_target_noise=False)
# rlstm.name = name
# rlstm.data = [train_data, valid_data]
#
# rlstm.X_mask = np.ones((X_train[0].shape[-1],), dtype='int')
# rlstm.X_mask[-1:] = not is_city # pm25 mean
#
# print '\ntraining', rlstm.name
# X_train[0], X_valid[0] = normalize(X_train[0], X_valid[0], rlstm)
# rlstm.save_normalization_info(model_savedir + rlstm.name + '_norm_info.pkl')
# batch_size = (1 + int(not is_city)) * 64
# patience = 10 + int(is_city) * 10
# train(X_train, y_train, X_valid, y_valid, rlstm, batch_size=batch_size, patience=patience, nb_epoch=epoch)
if __name__ == '__main__':
train_model('beijing', is_city=True)
train_model('dongbei')
train_model('huabei')
train_model('xibei')
train_model('huadong')
train_model('huaxi')
train_model('huanan')
train_model('tianjin', is_city=True)
train_model('shanghai', is_city=True)
#rsync
machine_list = [
"10.144.246.254",
"inner.wrapper2.api.caiyunapp.com",
"10.174.213.150", "10.251.17.17",
"10.165.41.213"
]
print "rsync start"
for machine in machine_list :
os.system('rsync -av /ldata/pm25data/pm25model/rlstm/* caiyun@'+machine+':/ldata/pm25data/pm25model/rlstm/')
print "rsync finished"
| {
"content_hash": "bccf85825091272fb774895e1ae66675",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 132,
"avg_line_length": 43.31297709923664,
"alnum_prop": 0.5653859710962285,
"repo_name": "xiaoda99/keras",
"id": "fb5c5f951f1f423afa6cd41021514eced3a52943",
"size": "5696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pm25/train.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "453514"
}
],
"symlink_target": ""
} |
__all__ = ['fmin', 'fmin_powell','fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound','brent', 'golden','bracket','rosen','rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad']
__docformat__ = "restructuredtext en"
import numpy
from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, empty, \
squeeze, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf
try:
import linesearch # from SciPy
except ImportError:
linesearch = None
# These have been copied from Numeric's MLab.py
# I don't think they made the transition to scipy_core
def max(m,axis=0):
"""max(m,axis=0) returns the maximum of m along dimension axis.
"""
m = asarray(m)
return numpy.maximum.reduce(m,axis)
def min(m,axis=0):
"""min(m,axis=0) returns the minimum of m along dimension axis.
"""
m = asarray(m)
return numpy.minimum.reduce(m,axis)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return len(atleast_1d(x) == 1)
abs = absolute
import __builtin__
pymin = __builtin__.min
pymax = __builtin__.max
__version__ = "1.5.3"
_epsilon = sqrt(numpy.finfo(float).eps)
__maintainer__ = "Peter Maxwell"
__email__ = "pm67nz@gmail.com"
__status__ = "Production"
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(abs(x))
elif ord == -Inf:
return numpy.amin(abs(x))
else:
return numpy.sum(abs(x)**ord,axis=0)**(1.0/ord)
def rosen(x): # The Rosenbrock function
x = asarray(x)
return numpy.sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0,axis=0)
def rosen_der(x):
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm)
der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0])
der[-1] = 200*(x[-1]-x[-2]**2)
return der
def rosen_hess(x):
x = atleast_1d(x)
H = numpy.diag(-400*x[:-1],1) - numpy.diag(400*x[:-1],-1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200*x[0]-400*x[1]+2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x,p):
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1]
Hp[1:-1] = -400*x[:-2]*p[:-2]+(202+1200*x[1:-1]**2-400*x[2:])*p[1:-1] \
-400*x[1:-1]*p[2:]
Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
def function_wrapper(x):
ncalls[0] += 1
return function(x, *args)
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""Minimize a function using the downhill simplex algorithm.
:Parameters:
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
:Returns: (xopt, {fopt, iter, funcalls, warnflag})
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
*Other Parameters*:
xtol : float
Relative error in xopt acceptable for convergence.
ftol : number
Relative error in func(xopt) acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfun : number
Maximum number of function evaluations to make.
full_output : bool
Set to True if fval and warnflag outputs are desired.
disp : bool
Set to True to print convergence messages.
retall : bool
Set to True to return list of solutions at each iteration.
:Notes:
Uses a Nelder-Mead simplex algorithm to find the minimum of
function of one or more variables.
"""
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
rank = len(x0.shape)
if not -1 < rank < 2:
raise ValueError, "Initial guess must be a scalar or rank-1 sequence."
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1; chi = 2; psi = 0.5; sigma = 0.5;
one2np1 = range(1,N+1)
if rank == 0:
sim = numpy.zeros((N+1,), dtype=x0.dtype)
else:
sim = numpy.zeros((N+1,N), dtype=x0.dtype)
fsim = numpy.zeros((N+1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0,N):
y = numpy.array(x0,copy=True)
if y[k] != 0:
y[k] = (1+nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k+1] = y
f = func(y)
fsim[k+1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim,ind,0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim,ind,0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (max(numpy.ravel(abs(sim[1:]-sim[0]))) <= xtol \
and max(abs(fsim[0]-fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1],0) / N
xr = (1+rho)*xbar - rho*sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1+rho*chi)*xbar - rho*chi*sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1+psi*rho)*xbar - psi*rho*sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink=1
else:
# Perform an inside contraction
xcc = (1-psi)*xbar + psi*sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma*(sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim,ind,0)
fsim = numpy.take(fsim,ind,0)
if callback is not None:
callback(fcalls[0], sim[0], min(fsim))
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
if disp:
print "Warning: Maximum number of function evaluations has "\
"been exceeded."
elif iterations >= maxiter:
warnflag = 2
if disp:
print "Warning: Maximum number of iterations has been exceeded"
else:
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % iterations
print " Function evaluations: %d" % fcalls[0]
if full_output:
retlist = x, fval, iterations, fcalls[0], warnflag
if retall:
retlist += (allvecs,)
else:
retlist = x
if retall:
retlist = (x, allvecs)
return retlist
def _cubicmin(a,fa,fpa,b,fb,c,fc):
# finds the minimizer for a cubic polynomial that goes through the
# points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
#
# if no minimizer can be found return None
#
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
C = fpa
D = fa
db = b-a
dc = c-a
if (db == 0) or (dc == 0) or (b==c): return None
denom = (db*dc)**2 * (db-dc)
d1 = empty((2,2))
d1[0,0] = dc**2
d1[0,1] = -db**2
d1[1,0] = -dc**3
d1[1,1] = db**3
[A,B] = numpy.dot(d1,asarray([fb-fa-C*db,fc-fa-C*dc]).flatten())
A /= denom
B /= denom
radical = B*B-3*A*C
if radical < 0: return None
if (A == 0): return None
xmin = a + (-B + sqrt(radical))/(3*A)
return xmin
def _quadmin(a,fa,fpa,b,fb):
# finds the minimizer for a quadratic polynomial that goes through
# the points (a,fa), (b,fb) with derivative at a of fpa
# f(x) = B*(x-a)^2 + C*(x-a) + D
D = fa
C = fpa
db = b-a*1.0
if (db==0): return None
B = (fb-D-C*db)/(db*db)
if (B <= 0): return None
xmin = a - C / (2.0*B)
return xmin
def zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
phi, derphi, phi0, derphi0, c1, c2):
maxiter = 10
i = 0
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
phi_rec = phi0
a_rec = 0
while 1:
# interpolate to find a trial step length between a_lo and a_hi
# Need to choose interpolation here. Use cubic interpolation and then if the
# result is within delta * dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too close, then use bisection
dalpha = a_hi-a_lo;
if dalpha < 0: a,b = a_hi,a_lo
else: a,b = a_lo, a_hi
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
# if the result is too close to the end points (or out of the interval)
# then use quadratic interpolation with phi_lo, derphi_lo and phi_hi
# if the result is stil too close to the end points (or out of the interval)
# then use bisection
if (i > 0):
cchk = delta1*dalpha
a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec)
if (i==0) or (a_j is None) or (a_j > b-cchk) or (a_j < a+cchk):
qchk = delta2*dalpha
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
a_j = a_lo + 0.5*dalpha
# print "Using bisection."
# else: print "Using quadratic."
# else: print "Using cubic."
# Check new value of a_j
phi_aj = phi(a_j)
if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
derphi_aj = derphi(a_j)
if abs(derphi_aj) <= -c2*derphi0:
a_star = a_j
val_star = phi_aj
valprime_star = derphi_aj
break
if derphi_aj*(a_hi - a_lo) >= 0:
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
i += 1
if (i > maxiter):
a_star = a_j
val_star = phi_aj
valprime_star = None
break
return a_star, val_star, valprime_star
def line_search(f, myfprime, xk, pk, gfk, old_fval, old_old_fval,
args=(), c1=1e-4, c2=0.9, amax=50):
"""Find alpha that satisfies strong Wolfe conditions.
:Parameters:
f : callable f(x,*args)
Objective function.
myfprime : callable f'(x,*args)
Objective function gradient (can be None).
xk : ndarray
Starting point.
pk : ndarray
Search direction.
gfk : ndarray
Gradient value for x=xk (xk being the current parameter
estimate).
args : tuple
Additional arguments passed to objective function.
c1 : float
Parameter for Armijo condition rule.
c2 : float
Parameter for curvature condition rule.
:Returns:
alpha0 : float
Alpha for which ``x_new = x0 + alpha * pk``.
fc : int
Number of function evaluations made.
gc : int
Number of gradient evaluations made.
:Notes:
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pg. 59-60.
For the zoom phase it uses an algorithm by [...].
"""
global _ls_fc, _ls_gc, _ls_ingfk
_ls_fc = 0
_ls_gc = 0
_ls_ingfk = None
def phi(alpha):
global _ls_fc
_ls_fc += 1
return f(xk+alpha*pk,*args)
if isinstance(myfprime,type(())):
def phiprime(alpha):
global _ls_fc, _ls_ingfk
_ls_fc += len(xk)+1
eps = myfprime[1]
fprime = myfprime[0]
newargs = (f,eps) + args
_ls_ingfk = fprime(xk+alpha*pk,*newargs) # store for later use
return numpy.dot(_ls_ingfk,pk)
else:
fprime = myfprime
def phiprime(alpha):
global _ls_gc, _ls_ingfk
_ls_gc += 1
_ls_ingfk = fprime(xk+alpha*pk,*args) # store for later use
return numpy.dot(_ls_ingfk,pk)
alpha0 = 0
phi0 = old_fval
derphi0 = numpy.dot(gfk,pk)
alpha1 = pymin(1.0,1.01*2*(phi0-old_old_fval)/derphi0)
if alpha1 == 0:
# This shouldn't happen. Perhaps the increment has slipped below
# machine precision? For now, set the return variables skip the
# useless while loop, and raise warnflag=2 due to possible imprecision.
alpha_star = None
fval_star = old_fval
old_fval = old_old_fval
fprime_star = None
phi_a1 = phi(alpha1)
#derphi_a1 = phiprime(alpha1) evaluated below
phi_a0 = phi0
derphi_a0 = derphi0
i = 1
maxiter = 10
while 1: # bracketing phase
if alpha1 == 0:
break
if (phi_a1 > phi0 + c1*alpha1*derphi0) or \
((phi_a1 >= phi_a0) and (i > 1)):
alpha_star, fval_star, fprime_star = \
zoom(alpha0, alpha1, phi_a0,
phi_a1, derphi_a0, phi, phiprime,
phi0, derphi0, c1, c2)
break
derphi_a1 = phiprime(alpha1)
if (abs(derphi_a1) <= -c2*derphi0):
alpha_star = alpha1
fval_star = phi_a1
fprime_star = derphi_a1
break
if (derphi_a1 >= 0):
alpha_star, fval_star, fprime_star = \
zoom(alpha1, alpha0, phi_a1,
phi_a0, derphi_a1, phi, phiprime,
phi0, derphi0, c1, c2)
break
alpha2 = 2 * alpha1 # increase by factor of two on each iteration
i = i + 1
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi(alpha1)
derphi_a0 = derphi_a1
# stopping test if lower function not found
if (i > maxiter):
alpha_star = alpha1
fval_star = phi_a1
fprime_star = None
break
if fprime_star is not None:
# fprime_star is a number (derphi) -- so use the most recently
# calculated gradient used in computing it derphi = gfk*pk
# this is the gradient at the next step no need to compute it
# again in the outer loop.
fprime_star = _ls_ingfk
return alpha_star, _ls_fc, _ls_gc, fval_star, old_fval, fprime_star
def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""Minimize over alpha, the function ``f(xk+alpha pk)``.
Uses the interpolation algorithm (Armiijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57
:Returns: (alpha, fc, gc)
"""
xk = atleast_1d(xk)
fc = 0
phi0 = old_fval # compute f(xk) -- done in past loop
phi_a0 = f(*((xk+alpha0*pk,)+args))
fc = fc + 1
derphi0 = numpy.dot(gfk,pk)
if (phi_a0 <= phi0 + c1*alpha0*derphi0):
return alpha0, fc, 0, phi_a0
# Otherwise compute the minimizer of a quadratic interpolant:
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = f(*((xk+alpha1*pk,)+args))
fc = fc + 1
if (phi_a1 <= phi0 + c1*alpha1*derphi0):
return alpha1, fc, 0, phi_a1
# Otherwise loop with cubic interpolation until we find an alpha which
# satifies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while 1: # we are assuming pk is a descent direction
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
a = a / factor
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
b = b / factor
alpha2 = (-b + numpy.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
phi_a2 = f(*((xk+alpha2*pk,)+args))
fc = fc + 1
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
return alpha2, fc, 0, phi_a2
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
def approx_fprime(xk,f,epsilon,*args):
f0 = f(*((xk,)+args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = epsilon
grad[k] = (f(*((xk+ei,)+args)) - f0)/epsilon
ei[k] = 0.0
return grad
def check_grad(func, grad, x0, *args):
return sqrt(sum((grad(x0,*args)-approx_fprime(x0,func,_epsilon,*args))**2))
def approx_fhess_p(x0,p,fprime,epsilon,*args):
f2 = fprime(*((x0+epsilon*p,)+args))
f1 = fprime(*((x0,)+args))
return (f2 - f1)/epsilon
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""Minimize a function using the BFGS algorithm.
:Parameters:
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args)
Gradient of f.
args : tuple
Extra arguments passed to f and fprime.
gtol : float
Gradient norm must be less than gtol before succesful termination.
norm : float
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray
If fprime is approximated, use this value for the step size.
callback : callable
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
:Returns: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>)
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
Results at each iteration. Only returned if retall is True.
*Other Parameters*:
maxiter : int
Maximum number of iterations to perform.
full_output : bool
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool
Print convergence message if True.
retall : bool
Return a list of results at each iteration if True.
:Notes:
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS) See Wright, and Nocedal 'Numerical
Optimization', 1999, pg. 198.
*See Also*:
scikits.openopt : SciKit which offers a unified syntax to call
this and other solvers.
"""
x0 = asarray(x0).squeeze()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0)*200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N,dtype=int)
Hk = I
old_fval = f(x0)
old_old_fval = old_fval + 5000
xk = x0
if retall:
allvecs = [x0]
sk = [2*gtol]
warnflag = 0
gnorm = vecnorm(gfk,ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk,gfk)
alpha_k = None
if linesearch is not None:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
linesearch.line_search(f,myfprime,xk,pk,gfk,
old_fval,old_old_fval)
if alpha_k is None: # line search failed try different one.
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
line_search(f,myfprime,xk,pk,gfk,
old_fval,old_old_fval)
if alpha_k is None:
# line search(es) failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(func_calls[0], xk, old_fval)
k += 1
gnorm = vecnorm(gfk,ord=norm)
if (gnorm <= gtol):
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk,sk))
except ZeroDivisionError:
rhok = 1000.0
print "Divide-by-zero encountered: rhok assumed large"
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
print "Divide-by-zero encountered: rhok assumed large"
A1 = I - sk[:,numpy.newaxis] * yk[numpy.newaxis,:] * rhok
A2 = I - yk[:,numpy.newaxis] * sk[numpy.newaxis,:] * rhok
Hk = numpy.dot(A1,numpy.dot(Hk,A2)) + rhok * sk[:,numpy.newaxis] \
* sk[numpy.newaxis,:]
if disp or full_output:
fval = old_fval
if warnflag == 2:
if disp:
print "Warning: Desired error not necessarily achieved" \
"due to precision loss"
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
elif k >= maxiter:
warnflag = 1
if disp:
print "Warning: Maximum number of iterations has been exceeded"
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
else:
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
if full_output:
retlist = xk, fval, gfk, Hk, func_calls[0], grad_calls[0], warnflag
if retall:
retlist += (allvecs,)
else:
retlist = xk
if retall:
retlist = (xk, allvecs)
return retlist
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""Minimize a function using a nonlinear conjugate gradient algorithm.
:Parameters:
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args)
Function which computes the gradient of f.
args : tuple
Extra arguments passed to f and fprime.
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of vector norm to use. -Inf is min, Inf is max.
epsilon : float or ndarray
If fprime is approximated, use this value for the step
size (can be scalar or vector).
callback : callable
An optional user-supplied function, called after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
:Returns: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs})
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value found, f(xopt).
func_calls : int
The number of function_calls made.
grad_calls : int
The number of gradient calls made.
warnflag : int
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : ndarray
If retall is True (see other parameters below), then this
vector containing the result at each iteration is returned.
*Other Parameters*:
maxiter : int
Maximum number of iterations to perform.
full_output : bool
If True then return fopt, func_calls, grad_calls, and
warnflag in addition to xopt.
disp : bool
Print convergence message if True.
retall : bool
return a list of results at each iteration if True.
:Notes:
Optimize the function, f, whose gradient is given by fprime
using the nonlinear conjugate gradient algorithm of Polak and
Ribiere See Wright, and Nocedal 'Numerical Optimization',
1999, pg. 120-122.
"""
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0)*200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
xk = x0
old_fval = f(xk)
old_old_fval = old_fval + 5000
if retall:
allvecs = [xk]
sk = [2*gtol]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk,ord=norm)
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk,gfk)
# These values are modified by the line search, even if it fails
old_fval_backup = old_fval
old_old_fval_backup = old_old_fval
alpha_k = None
if linesearch is not None:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval,
old_old_fval,c2=0.4)
if alpha_k is None: # line search failed -- use different one.
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
line_search(f,myfprime,xk,pk,gfk,
old_fval_backup,old_old_fval_backup)
if alpha_k is None or alpha_k == 0:
# line search(es) failed to find a better solution.
warnflag = 2
break
xk = xk + alpha_k*pk
if retall:
allvecs.append(xk)
if gfkp1 is None:
gfkp1 = myfprime(xk)
yk = gfkp1 - gfk
beta_k = pymax(0,numpy.dot(yk,gfkp1)/deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm = vecnorm(gfk,ord=norm)
if callback is not None:
callback(func_calls[0], xk, old_fval)
k += 1
if disp or full_output:
fval = old_fval
if warnflag == 2:
if disp:
print "Warning: Desired error not necessarily achieved due to precision loss"
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
elif k >= maxiter:
warnflag = 1
if disp:
print "Warning: Maximum number of iterations has been exceeded"
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
else:
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
if full_output:
retlist = xk, fval, func_calls[0], grad_calls[0], warnflag
if retall:
retlist += (allvecs,)
else:
retlist = xk
if retall:
retlist = (xk, allvecs)
return retlist
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""Minimize a function using the Newton-CG method.
:Parameters:
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args)
Gradient of f.
fhess_p : callable fhess_p(x,p,*args)
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable fhess(x,*args)
Function to compute the Hessian matrix of f.
args : tuple
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
callback : callable
An optional user-supplied function which is called after
each iteration. Called as callback(n,xk,f), where xk is the
current parameter vector.
:Returns: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs})
xopt : ndarray
Parameters which minimizer f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
*Other Parameters*:
avextol : float
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int
Maximum number of iterations to perform.
full_output : bool
If True, return the optional outputs.
disp : bool
If True, print convergence message.
retall : bool
If True, return a list of results at each iteration.
:Notes:
1. scikits.openopt offers a unified syntax to call this and other solvers.
2. Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it. See Wright, and Nocedal 'Numerical Optimization', 1999,
pg. 140.
"""
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
xtol = len(x0)*avextol
update = [2*xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
while (numpy.add.reduce(abs(update)) > xtol) and (k < maxiter):
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(abs(b))
eta = min([0.5,numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri,ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,)+args)
hcalls = hcalls + 1
while numpy.add.reduce(abs(ri)) > termcond:
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk,psupi,fprime,epsilon)
else:
Ap = fhess_p(xk,psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A,psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi,Ap)
if curv == 0.0:
break
elif curv < 0:
if (i > 0):
break
else:
xsupi = xsupi + dri0/curv * psupi
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri,ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval)
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(fcalls[0], xk, old_fval)
if retall:
allvecs.append(xk)
k += 1
if disp or full_output:
fval = old_fval
if k >= maxiter:
warnflag = 1
if disp:
print "Warning: Maximum number of iterations has been exceeded"
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % fcalls[0]
print " Gradient evaluations: %d" % gcalls[0]
print " Hessian evaluations: %d" % hcalls
else:
warnflag = 0
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % fcalls[0]
print " Gradient evaluations: %d" % gcalls[0]
print " Hessian evaluations: %d" % hcalls
if full_output:
retlist = xk, fval, fcalls[0], gcalls[0], hcalls, warnflag
if retall:
retlist += (allvecs,)
else:
retlist = xk
if retall:
retlist = (xk, allvecs)
return retlist
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
:Parameters:
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple
Extra arguments passed to function.
xtol : float
The convergence tolerance.
maxfun : int
Maximum number of function evaluations allowed.
full_output : bool
If True, return optional outputs.
disp : int
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
:Returns: (xopt, {fval, ierr, numfunc})
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
:Notes:
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
"""
# Test bounds are of correct form
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step=' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5*(3.0-sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean*(b-a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x,*args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5*(a+b)
tol1 = sqrt_eps*abs(xf) + xtol / 3.0
tol2 = 2.0*tol1
if disp > 2:
print (" ")
print (header)
print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))
while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) ):
golden = 1
# Check for parabolic fit
if abs(e) > tol1:
golden = 0
r = (xf-nfc)*(fx-ffulc)
q = (xf-fulc)*(fx-fnfc)
p = (xf-fulc)*q - (xf-nfc)*r
q = 2.0*(q-r)
if q > 0.0: p = -p
q = abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and \
(p < q*(b-xf))):
rat = (p+0.0) / q;
x = xf + rat
step = ' parabolic'
if ((x-a) < tol2) or ((b-x) < tol2):
si = numpy.sign(xm-xf) + ((xm-xf)==0)
rat = tol1*si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e=a-xf
else:
e=b-xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si*max([abs(rat), tol1])
fu = func(x,*args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5*(a+b)
tol1 = sqrt_eps*abs(xf) + xtol/3.0
tol2 = 2.0*tol1
if num >= maxfun:
flag = 1
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xtol, disp)
if full_output:
return xf, fval, flag, num
else:
return xf
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xtol, disp)
if full_output:
return xf, fval, flag, num
else:
return xf
class Brent:
#need to rethink design of __init__
def __init__(self, func, tol=1.48e-8, maxiter=500):
self.func = func
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
self.brack = None
self._brack_info = None
#need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack = None):
self.brack = brack
self._brack_info = self.get_bracket_info()
def get_bracket_info(self):
#set up
func = self.func
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa,xb,xc,fa,fb,fc,funcalls = bracket(func)
elif len(brack) == 2:
xa,xb,xc,fa,fb,fc,funcalls = bracket(func, xa=brack[0], xb=brack[1])
elif len(brack) == 3:
xa,xb,xc = brack
if (xa > xc): # swap so xa < xc can be assumed
dum = xa; xa=xc; xc=dum
assert ((xa < xb) and (xb < xc)), "Not a bracketing interval."
fa = func(xa)
fb = func(xb)
fc = func(xc)
assert ((fb<fa) and (fb < fc)), "Not a bracketing interval."
funcalls = 3
else:
raise ValueError, "Bracketing interval must be length 2 or 3 sequence."
### END core bracket_info code ###
self.funcalls += funcalls
return xa,xb,xc,fa,fb,fc
def optimize(self):
#set up for optimization
func = self.func
if self._brack_info is None:
self.set_bracket(None)
xa,xb,xc,fa,fb,fc = self._brack_info
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#we are making NO CHANGES in this
#################################
x=w=v=xb
fw=fv=fx=func(x)
if (xa < xc):
a = xa; b = xc
else:
a = xc; b = xa
deltax= 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol*abs(x) + _mintol
tol2 = 2.0*tol1
xmid = 0.5*(a+b)
if abs(x-xmid) < (tol2-0.5*(b-a)): # check for convergence
xmin=x; fval=fx
break
infinities_present = [f for f in [fw, fv, fx] if numpy.isposinf(f)]
if infinities_present or (abs(deltax) <= tol1):
if (x>=xmid): deltax=a-x # do a golden section step
else: deltax=b-x
rat = _cg*deltax
else: # do a parabolic step
tmp1 = (x-w)*(fx-fv)
tmp2 = (x-v)*(fx-fw)
p = (x-v)*tmp2 - (x-w)*tmp1;
tmp2 = 2.0*(tmp2-tmp1)
if (tmp2 > 0.0): p = -p
tmp2 = abs(tmp2)
dx_temp = deltax
deltax= rat
# check parabolic fit
if ((p > tmp2*(a-x)) and (p < tmp2*(b-x)) and (abs(p) < abs(0.5*tmp2*dx_temp))):
rat = p*1.0/tmp2 # if parabolic step is useful.
u = x + rat
if ((u-a) < tol2 or (b-u) < tol2):
if xmid-x >= 0: rat = tol1
else: rat = -tol1
else:
if (x>=xmid): deltax=a-x # if it's not do a golden section step
else: deltax=b-x
rat = _cg*deltax
if (abs(rat) < tol1): # update by at least tol1
if rat >= 0: u = x + tol1
else: u = x - tol1
else:
u = x + rat
fu = func(u) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u<x): a=u
else: b=u
if (fu<=fw) or (w==x):
v=w; w=u; fv=fw; fw=fu
elif (fu<=fv) or (v==x) or (v==w):
v=u; fv=fu
else:
if (u >= x): a = x
else: b = x
v=w; w=x; x=u
fv=fw; fw=fx; fx=fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
:Parameters:
func : callable f(x)
Objective function.
brack : tuple
Triple (a,b,c) where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,c)
then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that the obtained solution will satisfy a<=x<=c.
full_output : bool
If True, return all output args (xmin, fval, iter,
funcalls).
:Returns:
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up convergence
of golden section method.
"""
brent = Brent(func=func, tol=tol, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
return brent.get_result(full_output=full_output)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
""" Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
:Parameters:
func : callable func(x,*args)
Objective function to minimize.
args : tuple
Additional arguments (if present), passed to func.
brack : tuple
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float
x tolerance stop criterion
full_output : bool
If True, return optional outputs.
:Notes:
Uses analog of bisection method to decrease the bracketed
interval.
"""
if brack is None:
xa,xb,xc,fa,fb,fc,funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa,xb,xc,fa,fb,fc,funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args)
elif len(brack) == 3:
xa,xb,xc = brack
if (xa > xc): # swap so xa < xc can be assumed
dum = xa; xa=xc; xc=dum
assert ((xa < xb) and (xb < xc)), "Not a bracketing interval."
fa = func(*((xa,)+args))
fb = func(*((xb,)+args))
fc = func(*((xc,)+args))
assert ((fb<fa) and (fb < fc)), "Not a bracketing interval."
funcalls = 3
else:
raise ValueError, "Bracketing interval must be length 2 or 3 sequence."
_gR = 0.61803399
_gC = 1.0-_gR
x3 = xc
x0 = xa
if (abs(xc-xb) > abs(xb-xa)):
x1 = xb
x2 = xb + _gC*(xc-xb)
else:
x2 = xb
x1 = xb - _gC*(xb-xa)
f1 = func(*((x1,)+args))
f2 = func(*((x2,)+args))
funcalls += 2
while (abs(x3-x0) > tol*(abs(x1)+abs(x2))):
if (f2 < f1):
x0 = x1; x1 = x2; x2 = _gR*x1 + _gC*x3
f1 = f2; f2 = func(*((x2,)+args))
else:
x3 = x2; x2 = x1; x1 = _gR*x2 + _gC*x0
f2 = f1; f1 = func(*((x1,)+args))
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
if full_output:
return xmin, fval, funcalls
else:
return xmin
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
:Parameters:
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float
Bracketing interval.
args : tuple
Additional arguments (if present), passed to `func`.
grow_limit : float
Maximum grow limit.
maxiter : int
Maximum number of iterations to perform.
:Returns: xa, xb, xc, fa, fb, fc, funcalls
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = func(*(xa,)+args)
fb = func(*(xb,)+args)
if (fa < fb): # Switch so fa > fb
dum = xa; xa = xb; xb = dum
dum = fa; fa = fb; fb = dum
xc = xb + _gold*(xb-xa)
fc = func(*((xc,)+args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa)*(fb-fc)
tmp2 = (xb - xc)*(fb-fa)
val = tmp2-tmp1
if abs(val) < _verysmall_num:
denom = 2.0*_verysmall_num
else:
denom = 2.0*val
w = xb - ((xb-xc)*tmp2-(xb-xa)*tmp1)/denom
wlim = xb + grow_limit*(xc-xb)
if iter > maxiter:
raise RuntimeError, "Too many iterations."
iter += 1
if (w-xc)*(xb-w) > 0.0:
fw = func(*((w,)+args))
funcalls += 1
if (fw < fc):
xa = xb; xb=w; fa=fb; fb=fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w; fc=fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold*(xc-xb)
fw = func(*((w,)+args))
funcalls += 1
elif (w-wlim)*(wlim-xc) >= 0.0:
w = wlim
fw = func(*((w,)+args))
funcalls += 1
elif (w-wlim)*(xc-w) > 0.0:
fw = func(*((w,)+args))
funcalls += 1
if (fw < fc):
xb=xc; xc=w; w=xc+_gold*(xc-xb)
fb=fc; fc=fw; fw=func(*((w,)+args))
funcalls += 1
else:
w = xc + _gold*(xc-xb)
fw = func(*((w,)+args))
funcalls += 1
xa=xb; xb=xc; xc=w
fa=fb; fb=fc; fc=fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(linesearch, func, p, xi, tol):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha * xi)
alpha_min, fret, iter, num = linesearch(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p+xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None, linesearch=brent):
"""Minimize a function using modified Powell's method.
:Parameters:
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple
Eextra arguments passed to func.
callback : callable
An optional user-supplied function, called after each
iteration. Called as ``callback(n,xk,f)``, where ``xk`` is the
current parameter vector.
direc : ndarray
Initial direction set.
:Returns: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs})
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
*Other Parameters*:
xtol : float
Line-search error tolerance.
ftol : float
Absolute error in ``func(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfun : int
Maximum number of function evaluations to make.
full_output : bool
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool
If True, print convergence messages.
retall : bool
If True, return a list of the solution at each iteration.
:Notes:
Uses a modification of Powell's method to find the minimum of
a function of N variables.
"""
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
rank = len(x.shape)
if not -1 < rank < 2:
raise ValueError, "Initial guess must be a scalar or rank-1 sequence."
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0;
ilist = range(N)
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(linesearch,
func, x, direc1, xtol*100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(fcalls[0], x, fval, delta)
if retall:
allvecs.append(x)
if abs(fx - fval) < ftol: break
if fcalls[0] >= maxfun: break
if iter >= maxiter: break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx+fx2-2.0*fval)
temp = (fx-fval-delta)
t *= temp*temp
temp = fx-fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(linesearch,
func, x, direc1, xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
if disp:
print "Warning: Maximum number of function evaluations has "\
"been exceeded."
elif iter >= maxiter:
warnflag = 2
if disp:
print "Warning: Maximum number of iterations has been exceeded"
else:
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % iter
print " Function evaluations: %d" % fcalls[0]
x = squeeze(x)
if full_output:
retlist = x, fval, direc, iter, fcalls[0], warnflag
if retall:
retlist += (allvecs,)
else:
retlist = x
if retall:
retlist = (x, allvecs)
return retlist
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print "\nOptimization terminated successfully;\n" \
"The returned value satisfies the termination criteria\n" \
"(using xtol = ", xtol, ")"
if flag == 1:
print "\nMaximum number of function evaluations exceeded --- " \
"increase maxfun argument.\n"
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin):
"""Minimize a function over a given range by brute force.
:Parameters:
func : callable ``f(x,*args)``
Objective function to be minimized.
ranges : tuple
Each element is a tuple of parameters or a slice object to
be handed to ``numpy.mgrid``.
args : tuple
Extra arguments passed to function.
Ns : int
Default number of samples, if those are not provided.
full_output : bool
If True, return the evaluation grid.
:Returns: (x0, fval, {grid, Jout})
x0 : ndarray
Value of arguments to `func`, giving minimum over the grid.
fval : int
Function value at minimum.
grid : tuple
Representation of the evaluation grid. It has the same
length as x0.
Jout : ndarray
Function values over grid: ``Jout = func(*grid)``.
:Notes:
Find the minimum of a function evaluated on a grid given by
the tuple ranges.
"""
N = len(ranges)
if N > 40:
raise ValueError, "Brute Force not possible with more " \
"than 40 variables."
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N==1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params,*args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N==1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(),axis=-1)
Nindx = zeros(N,int)
xmin = zeros(N,float)
for k in range(N-1,-1,-1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx / thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N==1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
vals = finish(func,xmin,args=args,full_output=1, disp=0)
xmin = vals[0]
Jmin = vals[1]
if vals[-1] > 0:
print "Warning: Final optimization did not succeed"
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def main():
import time
times = []
algor = []
x0 = [0.8,1.2,0.7]
print "Nelder-Mead Simplex"
print "==================="
start = time.time()
x = fmin(rosen,x0)
print x
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print
print "Powell Direction Set Method"
print "==========================="
start = time.time()
x = fmin_powell(rosen,x0)
print x
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print
print "Nonlinear CG"
print "============"
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print x
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print
print "BFGS Quasi-Newton"
print "================="
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print x
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print
print "BFGS approximate gradient"
print "========================="
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print x
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print
print "Newton-CG with Hessian product"
print "=============================="
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print x
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print
print "Newton-CG with full Hessian"
print "==========================="
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print x
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print
print "\nMinimizing the Rosenbrock function of order 3\n"
print " Algorithm \t\t\t Seconds"
print "===========\t\t\t ========="
for k in range(len(algor)):
print algor[k], "\t -- ", times[k]
if __name__ == "__main__":
main()
| {
"content_hash": "ff90155a4620fa6b555025a4ca852b62",
"timestamp": "",
"source": "github",
"line_count": 1995,
"max_line_length": 97,
"avg_line_length": 31.90877192982456,
"alnum_prop": 0.518646517326966,
"repo_name": "sauloal/cnidaria",
"id": "7dccce59d557ceebb281405ed589b1833adb2a5d",
"size": "64370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/venv/lib/python2.7/site-packages/cogent/maths/scipy_optimize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1696790"
},
{
"name": "C++",
"bytes": "3035466"
},
{
"name": "CSS",
"bytes": "20306"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "32478"
},
{
"name": "HTML",
"bytes": "19658"
},
{
"name": "JavaScript",
"bytes": "250616"
},
{
"name": "Jupyter Notebook",
"bytes": "8401292"
},
{
"name": "M4",
"bytes": "3905"
},
{
"name": "Makefile",
"bytes": "177650"
},
{
"name": "Objective-C",
"bytes": "1701"
},
{
"name": "Python",
"bytes": "28122291"
},
{
"name": "R",
"bytes": "86108"
},
{
"name": "Shell",
"bytes": "676123"
}
],
"symlink_target": ""
} |
"""
Setup
Automation of MitM Attack on WiFi Networks
Bachelor's Thesis UIFS FIT VUT
Martin Vondracek
2016
"""
from setuptools import setup
__author__ = 'Martin Vondracek'
__email__ = 'xvondr20@stud.fit.vutbr.cz'
def readme():
with open('README.md') as f:
return f.read()
setup(
name='wifimitm',
description="Automation of MitM Attack on WiFi Networks, Bachelor's Thesis, UIFS FIT VUT, 2016",
long_description=readme(),
author=__author__,
author_email=__email__,
url='http://mvondracek.github.io/wifimitm/',
version='0.6.0',
packages=['wifimitm', 'wifimitm.tests'],
setup_requires=['setuptools_git >= 0.3'],
install_requires=['netifaces', 'coloredlogs'],
test_suite='wifimitm.tests',
include_package_data=True,
entry_points={
'console_scripts': [
'wifimitmcli = wifimitm.wifimitmcli:main',
]
}
)
| {
"content_hash": "fb3e0966e25d1a18d5e6c45b3061c1a5",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 100,
"avg_line_length": 20.522727272727273,
"alnum_prop": 0.6389811738648948,
"repo_name": "mvondracek/wifimitm",
"id": "e6b5fd773eed52ef082058ebadef40680b98eabc",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5653"
},
{
"name": "Makefile",
"bytes": "641"
},
{
"name": "Python",
"bytes": "135542"
},
{
"name": "Roff",
"bytes": "3162"
},
{
"name": "Shell",
"bytes": "14903"
}
],
"symlink_target": ""
} |
from globibot.lib.web.handlers import ContextHandler
from globibot.lib.web.constants import USER_COOKIE_NAME
from globibot.lib.web.decorators import with_body_arguments, async_handler
from globibot.lib.transaction import Transaction
from . import queries as q
from . import constants as c
from http import HTTPStatus
from hashlib import pbkdf2_hmac
from binascii import hexlify
import random
import string
tokenCache = dict()
def make_token():
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(32)
)
make_salt = make_token
def hash_password(password, salt):
hashed = pbkdf2_hmac(
c.PKCS_HASH_NAME,
password.encode(),
salt.encode(),
c.PKCS_ITERATION_COUNT
)
return hexlify(hashed).decode('ascii')
def check_credentials_for(user, password, db):
with Transaction(db) as trans:
trans.execute(q.get_credentials, dict(id=user.id))
credentials = trans.fetchone()
if not credentials:
return False
expected_hashed_password, password_salt = credentials
hashed_password = hash_password(password, password_salt)
return (expected_hashed_password == hashed_password)
return False
def register_user(user_id, password, db):
with Transaction(db) as trans:
trans.execute(q.get_person, dict(id=user_id))
if trans.fetchone():
return False
password_salt = make_salt()
hashed_password = hash_password(password, password_salt)
data = dict(
id = user_id,
password = hashed_password,
password_salt = password_salt
)
trans.execute(q.create_user, data)
return True
class LoginHandler(ContextHandler):
@with_body_arguments('user', 'password')
def post(self, user, password):
users = self.bot.find_users_by_name(user)
if users:
for user in users:
if check_credentials_for(user, password, self.bot.db):
self.set_secure_cookie(
USER_COOKIE_NAME,
user.id,
expires_days=c.SESSION_COOKIE_DURATION
)
return
self.set_status(HTTPStatus.BAD_REQUEST)
class RegistrationHandler(ContextHandler):
@with_body_arguments('user', 'token', 'password')
def post(self, user, token, password):
if tokenCache.get(user) != token:
self.set_status(HTTPStatus.BAD_REQUEST)
self.write('Invalid token')
return
if len(password) == 0:
self.set_status(HTTPStatus.BAD_REQUEST)
self.write('Password too short')
return
if not register_user(user, password, self.bot.db):
self.set_status(HTTPStatus.BAD_REQUEST)
self.write('Already registered')
return
class RegistrationTokenHandler(ContextHandler):
@async_handler
async def post(self, user_id):
user = self.bot.find_user(user_id)
if user and user_id not in tokenCache:
token = make_token()
await self.bot.send_message(
user,
'Here is your registration token: `{}`'.format(token)
)
tokenCache[user_id] = token
else:
self.set_status(HTTPStatus.BAD_REQUEST)
| {
"content_hash": "c6414daf217b2a2998273307fb5d8310",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 74,
"avg_line_length": 27.677419354838708,
"alnum_prop": 0.6046037296037297,
"repo_name": "best-coloc-ever/globibot",
"id": "5c72992e87fa1a67c00032a540c56a85ad94a971",
"size": "3432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/src/globibot/api/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2285"
},
{
"name": "HTML",
"bytes": "804"
},
{
"name": "JavaScript",
"bytes": "5583"
},
{
"name": "Python",
"bytes": "232680"
},
{
"name": "Shell",
"bytes": "330"
}
],
"symlink_target": ""
} |
"""
This module contains Schedule-related CLI commands
"""
from ..models import Schedule
def create_schedule(session, logger, name, short_retention, long_retention, long_interval):
"""
Creates a schedule in database
"""
sched = Schedule(name=name,
short_retention=short_retention,
long_retention=long_retention,
long_interval=long_interval)
session.add(sched)
logger.info("Created schedule {}".format(name))
def delete_schedule(session, logger, name):
"""
Deletes a schedule from database
"""
sched = session.query(Schedule).filter(name=name).one()
session.delete(sched)
logger.info("Deleted schedule {}".format(name))
def list_schedules(session, logger):
"""
List schedules in database
"""
for sched in session.query(Schedule).all():
logger.info("- {}".format(sched))
| {
"content_hash": "992c3e86d16c603a27a751ed8bf202b2",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 91,
"avg_line_length": 24.07894736842105,
"alnum_prop": 0.6327868852459017,
"repo_name": "quanta-computing/suprabackup",
"id": "73e81d5d9235f33cbfc6f52736ce199fd8d33558",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "suprabackup/cli/schedule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35770"
}
],
"symlink_target": ""
} |
"""
=====================================================
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function Reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two numeric vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
euclidean -- the Euclidean distance.
mahalanobis -- the Mahalanobis distance.
minkowski -- the Minkowski distance.
seuclidean -- the normalized Euclidean distance.
sqeuclidean -- the squared Euclidean distance.
wminkowski -- the weighted Minkowski distance.
Distance functions between two boolean vectors (representing sets) ``u`` and
``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
computing the distances between all pairs.
.. autosummary::
:toctree: generated/
dice -- the Dice dissimilarity.
hamming -- the Hamming distance.
jaccard -- the Jaccard distance.
kulsinski -- the Kulsinski distance.
matching -- the matching dissimilarity.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
russellrao -- the Russell-Rao dissimilarity.
sokalmichener -- the Sokal-Michener dissimilarity.
sokalsneath -- the Sokal-Sneath dissimilarity.
yule -- the Yule dissimilarity.
:func:`hamming` also operates over discrete numerical vectors.
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
from __future__ import division, print_function, absolute_import
__all__ = [
'braycurtis',
'canberra',
'cdist',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'euclidean',
'hamming',
'is_valid_dm',
'is_valid_y',
'jaccard',
'kulsinski',
'mahalanobis',
'matching',
'minkowski',
'num_obs_dm',
'num_obs_y',
'pdist',
'rogerstanimoto',
'russellrao',
'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'squareform',
'wminkowski',
'yule'
]
import warnings
import numpy as np
from scipy._lib.six import callable, string_types
from scipy._lib.six import xrange
from . import _distance_wrap
from ..linalg import norm
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
return a
def _convert_to_bool(X):
return np.ascontiguousarray(X, dtype=bool)
def _convert_to_double(X):
return np.ascontiguousarray(X, dtype=np.double)
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def minkowski(u, v, p):
"""
Computes the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
Returns
-------
d : double
The Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(u - v, ord=p)
return dist
def wminkowski(u, v, p, w):
"""
Computes the weighted Minkowski distance between two 1-D arrays.
The weighted Minkowski distance between `u` and `v`, defined as
.. math::
\\left(\\sum{(|w_i (u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like
The weight vector.
Returns
-------
wminkowski : double
The weighted Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
w = _validate_vector(w)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(w * (u - v), ord=p)
return dist
def euclidean(u, v):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = norm(u - v)
return dist
def sqeuclidean(u, v):
"""
Computes the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
{||u-v||}_2^2.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
"""
# Preserve float dtypes, but convert everything else to np.float64
# for stability.
utype, vtype = None, None
if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
utype = np.float64
if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
vtype = np.float64
u = _validate_vector(u, dtype=utype)
v = _validate_vector(v, dtype=vtype)
u_v = u - v
return np.dot(u_v, u_v)
def cosine(u, v):
"""
Computes the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{||u||_2 ||v||_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = 1.0 - np.dot(u, v) / (norm(u) * norm(v))
return dist
def correlation(u, v):
"""
Computes the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
umu = u.mean()
vmu = v.mean()
um = u - umu
vm = v - vmu
dist = 1.0 - np.dot(um, vm) / (norm(um) * norm(vm))
return dist
def hamming(u, v):
"""
Computes the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.shape != v.shape:
raise ValueError('The 1d arrays must have equal lengths.')
return (u != v).mean()
def jaccard(u, v):
"""
Computes the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT}}
{c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
jaccard : double
The Jaccard distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = (np.double(np.bitwise_and((u != v),
np.bitwise_or(u != 0, v != 0)).sum())
/ np.double(np.bitwise_or(u != 0, v != 0).sum()))
return dist
def kulsinski(u, v):
"""
Computes the Kulsinski dissimilarity between two boolean 1-D arrays.
The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT} - c_{TT} + n}
{c_{FT} + c_{TF} + n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
kulsinski : double
The Kulsinski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
n = float(len(u))
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return (ntf + nft - ntt + n) / (ntf + nft + n)
def seuclidean(u, v, V):
"""
Returns the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between `u` and `v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return np.sqrt(((u - v) ** 2 / V).sum())
def cityblock(u, v):
"""
Computes the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return abs(u - v).sum()
def mahalanobis(u, v, VI):
"""
Computes the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : ndarray
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v):
"""
Computes the Chebyshev distance.
Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\max_i {|u_i-v_i|}.
Parameters
----------
u : (N,) array_like
Input vector.
v : (N,) array_like
Input vector.
Returns
-------
chebyshev : double
The Chebyshev distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return max(abs(u - v))
def braycurtis(u, v):
"""
Computes the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
return abs(u - v).sum() / abs(u + v).sum()
def canberra(u, v):
"""
Computes the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
olderr = np.seterr(invalid='ignore')
try:
d = np.nansum(abs(u - v) / (abs(u) + abs(v)))
finally:
np.seterr(**olderr)
return d
def _nbool_correspond_all(u, v):
if u.dtype != v.dtype:
raise TypeError("Arrays being compared must be of the same data type.")
if u.dtype == int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
elif u.dtype == bool:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
raise TypeError("Arrays being compared have unknown type.")
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v):
if u.dtype == int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
else:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
return (nft, ntf)
def yule(u, v):
"""
Computes the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft)
def matching(u, v):
"""
Computes the Hamming distance between two boolean 1-D arrays.
This is a deprecated synonym for :func:`hamming`.
"""
return hamming(u, v)
def dice(u, v):
"""
Computes the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) ndarray, bool
Input 1-D array.
v : (N,) ndarray, bool
Input 1-D array.
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(ntf + nft) / float(2.0 * ntt + ntf + nft)
def rogerstanimoto(u, v):
"""
Computes the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v):
"""
Computes the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
return float(len(u) - ntt) / float(len(u))
def sokalmichener(u, v):
"""
Computes the Sokal-Michener dissimilarity between two boolean 1-D arrays.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
nff = (~u & ~v).sum()
else:
ntt = (u * v).sum()
nff = ((1.0 - u) * (1.0 - v)).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v):
"""
Computes the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
denom = ntt + 2.0 * (ntf + nft)
if denom == 0:
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
# Registry of "simple" distance metrics' pdist and cdist implementations,
# meaning the ones that accept one dtype and have no additional arguments.
_SIMPLE_CDIST = {}
_SIMPLE_PDIST = {}
for names, wrap_name in [
(['braycurtis'], "bray_curtis"),
(['canberra'], "canberra"),
(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'], "chebyshev"),
(["cityblock", "cblock", "cb", "c"], "city_block"),
(["euclidean", "euclid", "eu", "e"], "euclidean"),
(["sqeuclidean", "sqe", "sqeuclid"], "sqeuclidean"),
]:
cdist_fn = getattr(_distance_wrap, "cdist_%s_wrap" % wrap_name)
pdist_fn = getattr(_distance_wrap, "pdist_%s_wrap" % wrap_name)
for name in names:
_SIMPLE_CDIST[name] = _convert_to_double, cdist_fn
_SIMPLE_PDIST[name] = _convert_to_double, pdist_fn
for name in ["dice", "kulsinski", "matching", "rogerstanimoto", "russellrao",
"sokalmichener", "sokalsneath", "yule"]:
wrap_name = "hamming" if name == "matching" else name
cdist_fn = getattr(_distance_wrap, "cdist_%s_bool_wrap" % wrap_name)
_SIMPLE_CDIST[name] = _convert_to_bool, cdist_fn
pdist_fn = getattr(_distance_wrap, "pdist_%s_bool_wrap" % wrap_name)
_SIMPLE_PDIST[name] = _convert_to_bool, pdist_fn
def pdist(X, metric='euclidean', p=2, w=None, V=None, VI=None):
"""
Pairwise distances between observations in n-dimensional space.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
{\\sum_i {|u_i+v_i|}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski')``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
w : ndarray, optional
The weight vector (for weighted Minkowski).
p : double, optional
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray, optional
The variance vector (for standardized Euclidean).
VI : ndarray, optional
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<n`), the
metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``ij``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but with
# a more succinct, verifiable, but less efficient implementation.
X = np.asarray(X, order='c')
# The C code doesn't do striding.
X = _copy_array_if_base_present(X)
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
dm = np.zeros((m * (m - 1)) // 2, dtype=np.double)
wmink_names = ['wminkowski', 'wmi', 'wm', 'wpnorm']
if w is None and (metric == wminkowski or metric in wmink_names):
raise ValueError('weighted minkowski requires a weight '
'vector `w` to be given.')
if callable(metric):
if metric == minkowski:
def dfun(u, v):
return minkowski(u, v, p)
elif metric == wminkowski:
def dfun(u, v):
return wminkowski(u, v, p, w)
elif metric == seuclidean:
def dfun(u, v):
return seuclidean(u, v, V)
elif metric == mahalanobis:
def dfun(u, v):
return mahalanobis(u, v, V)
else:
dfun = metric
X = _convert_to_double(X)
k = 0
for i in xrange(0, m - 1):
for j in xrange(i + 1, m):
dm[k] = dfun(X[i], X[j])
k = k + 1
elif isinstance(metric, string_types):
mstr = metric.lower()
try:
validate, pdist_fn = _SIMPLE_PDIST[mstr]
X = validate(X)
pdist_fn(X, dm)
return dm
except KeyError:
pass
if mstr in ['hamming', 'hamm', 'ha', 'h']:
if X.dtype == bool:
X = _convert_to_bool(X)
_distance_wrap.pdist_hamming_bool_wrap(X, dm)
else:
X = _convert_to_double(X)
_distance_wrap.pdist_hamming_wrap(X, dm)
elif mstr in ['jaccard', 'jacc', 'ja', 'j']:
if X.dtype == bool:
X = _convert_to_bool(X)
_distance_wrap.pdist_jaccard_bool_wrap(X, dm)
else:
X = _convert_to_double(X)
_distance_wrap.pdist_jaccard_wrap(X, dm)
elif mstr in ['minkowski', 'mi', 'm']:
X = _convert_to_double(X)
_distance_wrap.pdist_minkowski_wrap(X, dm, p)
elif mstr in wmink_names:
X = _convert_to_double(X)
w = _convert_to_double(np.asarray(w))
_distance_wrap.pdist_weighted_minkowski_wrap(X, dm, p, w)
elif mstr in ['seuclidean', 'se', 's']:
X = _convert_to_double(X)
if V is not None:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
# The C code doesn't do striding.
VV = _copy_array_if_base_present(_convert_to_double(V))
else:
VV = np.var(X, axis=0, ddof=1)
_distance_wrap.pdist_seuclidean_wrap(X, VV, dm)
elif mstr in ['cosine', 'cos']:
X = _convert_to_double(X)
norms = _row_norms(X)
_distance_wrap.pdist_cosine_wrap(X, dm, norms)
elif mstr in ['old_cosine', 'old_cos']:
X = _convert_to_double(X)
norms = _row_norms(X)
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[xrange(0, m), xrange(0, m)] = 0.0
dm = squareform(dm)
elif mstr in ['correlation', 'co']:
X = _convert_to_double(X)
X2 = X - X.mean(1)[:, np.newaxis]
norms = _row_norms(X2)
_distance_wrap.pdist_cosine_wrap(X2, dm, norms)
elif mstr in ['mahalanobis', 'mahal', 'mah']:
X = _convert_to_double(X)
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
VI = _copy_array_if_base_present(VI)
else:
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
V = np.atleast_2d(np.cov(X.T))
VI = _convert_to_double(np.linalg.inv(V).T.copy())
# sqrt((u-v)V^(-1)(u-v)^T)
_distance_wrap.pdist_mahalanobis_wrap(X, VI, dm)
elif metric == 'test_euclidean':
dm = pdist(X, euclidean)
elif metric == 'test_sqeuclidean':
if V is None:
V = np.var(X, axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = pdist(X, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_braycurtis':
dm = pdist(X, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
V = np.cov(X.T)
VI = np.linalg.inv(V)
else:
VI = np.asarray(VI, order='c')
VI = _copy_array_if_base_present(VI)
# sqrt((u-v)V^(-1)(u-v)^T)
dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = pdist(X, canberra)
elif metric == 'test_cityblock':
dm = pdist(X, cityblock)
elif metric == 'test_minkowski':
dm = pdist(X, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = pdist(X, wminkowski, p=p, w=w)
elif metric == 'test_cosine':
dm = pdist(X, cosine)
elif metric == 'test_correlation':
dm = pdist(X, correlation)
elif metric == 'test_hamming':
dm = pdist(X, hamming)
elif metric == 'test_jaccard':
dm = pdist(X, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = pdist(X, chebyshev)
elif metric == 'test_yule':
dm = pdist(X, yule)
elif metric == 'test_matching':
dm = pdist(X, matching)
elif metric == 'test_dice':
dm = pdist(X, dice)
elif metric == 'test_kulsinski':
dm = pdist(X, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = pdist(X, rogerstanimoto)
elif metric == 'test_russellrao':
dm = pdist(X, russellrao)
elif metric == 'test_sokalsneath':
dm = pdist(X, sokalsneath)
elif metric == 'test_sokalmichener':
dm = pdist(X, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
def squareform(X, force="no", checks=True):
"""
Converts a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : ndarray
Either a condensed or redundant distance matrix.
force : str, optional
As with MATLAB(TM), if force is equal to ``'tovector'`` or
``'tomatrix'``, the input will be treated as a distance matrix or
distance vector respectively.
checks : bool, optional
If set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
Notes
-----
1. v = squareform(X)
Given a square d-by-d symmetric distance matrix X,
``v = squareform(X)`` returns a ``d * (d-1) / 2`` (or
:math:`{n \\choose 2}`) sized vector v.
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` is the distance
between points i and j. If X is non-square or asymmetric, an error
is returned.
2. X = squareform(v)
Given a ``d*(d-1)/2`` sized v for some integer ``d >= 2`` encoding
distances as described, ``X = squareform(v)`` returns a d by d distance
matrix X. The ``X[i, j]`` and ``X[j, i]`` values are set to
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` and all
diagonal elements are zero.
In Scipy 0.19.0, ``squareform`` stopped casting all input types to
float64, and started returning arrays of the same dtype as the input.
"""
X = np.ascontiguousarray(X)
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if s[0] == 0:
return np.zeros((1, 1), dtype=X.dtype)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(s[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) != s[0] * 2:
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=X.dtype)
# Create a vector.
v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError(('The first argument must be one or two dimensional '
'array. A %d-dimensional array is not '
'permitted') % len(s))
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Returns True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays.
They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Returns True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional numpy arrays.
Their length must be a binomial coefficient :math:`{n \\choose 2}`
for some positive integer n.
Parameters
----------
y : ndarray
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throw : bool, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
"""
y = np.asarray(y, order='c')
valid = True
try:
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def num_obs_dm(d):
"""
Returns the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : ndarray
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Returns the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : ndarray
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def _row_norms(X):
norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
return np.sqrt(norms, out=norms)
def _cosine_cdist(XA, XB, dm):
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
np.dot(XA, XB.T, out=dm)
dm /= _row_norms(XA).reshape(-1, 1)
dm /= _row_norms(XB)
dm *= -1
dm += 1
def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
"""
Computes distance between each pair of the two collections of inputs.
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
{\\sum_i (|u_i+v_i|)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see `yule` function documentation)
15. ``Y = cdist(XA, XB, 'matching')``
Synonym for 'hamming'.
16. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
`dice` function documentation)
17. ``Y = cdist(XA, XB, 'kulsinski')``
Computes the Kulsinski distance between the boolean
vectors. (see `kulsinski` function documentation)
18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see `rogerstanimoto` function documentation)
19. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see `russellrao` function documentation)
20. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see `sokalmichener` function documentation)
21. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
`sokalsneath` function documentation)
22. ``Y = cdist(XA, XB, 'wminkowski')``
Computes the weighted Minkowski distance between the
vectors. (see `wminkowski` function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function `sokalsneath`. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax::
dm = cdist(XA, XB, 'sokalsneath')
Parameters
----------
XA : ndarray
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
XB : ndarray
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
metric : str or callable, optional
The distance metric to use. If a string, the distance function can be
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'wminkowski', 'yule'.
w : ndarray, optional
The weight vector (for weighted Minkowski).
p : scalar, optional
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray, optional
The variance vector (for standardized Euclidean).
VI : ndarray, optional
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
ValueError
An exception is thrown if `XA` and `XB` do not have
the same number of columns.
Examples
--------
Find the Euclidean distances between four 2-D coordinates:
>>> from scipy.spatial import distance
>>> coords = [(35.0456, -85.2672),
... (35.1174, -89.9711),
... (35.9728, -83.9422),
... (36.1667, -86.7833)]
>>> distance.cdist(coords, coords, 'euclidean')
array([[ 0. , 4.7044, 1.6172, 1.8856],
[ 4.7044, 0. , 6.0893, 3.3561],
[ 1.6172, 6.0893, 0. , 2.8477],
[ 1.8856, 3.3561, 2.8477, 0. ]])
Find the Manhattan distance from a 3-D point to the corners of the unit
cube:
>>> a = np.array([[0, 0, 0],
... [0, 0, 1],
... [0, 1, 0],
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 1],
... [1, 1, 0],
... [1, 1, 1]])
>>> b = np.array([[ 0.1, 0.2, 0.4]])
>>> distance.cdist(a, b, 'cityblock')
array([[ 0.7],
[ 0.9],
[ 1.3],
[ 1.5],
[ 1.5],
[ 1.7],
[ 2.1],
[ 2.3]])
"""
# You can also call this as:
# Y = cdist(XA, XB, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in XA and XB using the distance metric 'abc'
# but with a more succinct, verifiable, but less efficient implementation.
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
# The C code doesn't do striding.
XA = _copy_array_if_base_present(_convert_to_double(XA))
XB = _copy_array_if_base_present(_convert_to_double(XB))
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
dm = np.zeros((mA, mB), dtype=np.double)
if callable(metric):
if metric == minkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = minkowski(XA[i, :], XB[j, :], p)
elif metric == wminkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w)
elif metric == seuclidean:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = seuclidean(XA[i, :], XB[j, :], V)
elif metric == mahalanobis:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V)
else:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i, :], XB[j, :])
elif isinstance(metric, string_types):
mstr = metric.lower()
try:
validate, cdist_fn = _SIMPLE_CDIST[mstr]
XA = validate(XA)
XB = validate(XB)
cdist_fn(XA, XB, dm)
return dm
except KeyError:
pass
if mstr in ['hamming', 'hamm', 'ha', 'h']:
if XA.dtype == bool:
XA = _convert_to_bool(XA)
XB = _convert_to_bool(XB)
_distance_wrap.cdist_hamming_bool_wrap(XA, XB, dm)
else:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_hamming_wrap(XA, XB, dm)
elif mstr in ['jaccard', 'jacc', 'ja', 'j']:
if XA.dtype == bool:
XA = _convert_to_bool(XA)
XB = _convert_to_bool(XB)
_distance_wrap.cdist_jaccard_bool_wrap(XA, XB, dm)
else:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_jaccard_wrap(XA, XB, dm)
elif mstr in ['minkowski', 'mi', 'm', 'pnorm']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_minkowski_wrap(XA, XB, dm, p)
elif mstr in ['wminkowski', 'wmi', 'wm', 'wpnorm']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
w = _convert_to_double(w)
_distance_wrap.cdist_weighted_minkowski_wrap(XA, XB, dm, p, w)
elif mstr in ['seuclidean', 'se', 's']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
if V is not None:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must be '
'one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the '
'distances are computed.')
# The C code doesn't do striding.
VV = _copy_array_if_base_present(_convert_to_double(V))
else:
VV = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
_distance_wrap.cdist_seuclidean_wrap(XA, XB, VV, dm)
elif mstr in ['cosine', 'cos']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_cosine_cdist(XA, XB, dm)
elif mstr in ['correlation', 'co']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
XA -= XA.mean(axis=1)[:, np.newaxis]
XB -= XB.mean(axis=1)[:, np.newaxis]
_cosine_cdist(XA, XB, dm)
elif mstr in ['mahalanobis', 'mahal', 'mah']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
VI = _copy_array_if_base_present(VI)
else:
m = mA + mB
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
X = np.vstack([XA, XB])
V = np.atleast_2d(np.cov(X.T))
del X
VI = np.linalg.inv(V).T.copy()
# sqrt((u-v)V^(-1)(u-v)^T)
_distance_wrap.cdist_mahalanobis_wrap(XA, XB, VI, dm)
elif metric == 'test_euclidean':
dm = cdist(XA, XB, euclidean)
elif metric == 'test_seuclidean':
if V is None:
V = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_sqeuclidean':
dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v))
elif metric == 'test_braycurtis':
dm = cdist(XA, XB, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
X = np.vstack([XA, XB])
V = np.cov(X.T)
VI = np.linalg.inv(V)
X = None
del X
else:
VI = np.asarray(VI, order='c')
VI = _copy_array_if_base_present(VI)
# sqrt((u-v)V^(-1)(u-v)^T)
dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = cdist(XA, XB, canberra)
elif metric == 'test_cityblock':
dm = cdist(XA, XB, cityblock)
elif metric == 'test_minkowski':
dm = cdist(XA, XB, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = cdist(XA, XB, wminkowski, p=p, w=w)
elif metric == 'test_correlation':
dm = cdist(XA, XB, correlation)
elif metric == 'test_hamming':
dm = cdist(XA, XB, hamming)
elif metric == 'test_jaccard':
dm = cdist(XA, XB, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = cdist(XA, XB, chebyshev)
elif metric == 'test_yule':
dm = cdist(XA, XB, yule)
elif metric == 'test_matching':
dm = cdist(XA, XB, matching)
elif metric == 'test_dice':
dm = cdist(XA, XB, dice)
elif metric == 'test_kulsinski':
dm = cdist(XA, XB, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = cdist(XA, XB, rogerstanimoto)
elif metric == 'test_russellrao':
dm = cdist(XA, XB, russellrao)
elif metric == 'test_sokalsneath':
dm = cdist(XA, XB, sokalsneath)
elif metric == 'test_sokalmichener':
dm = cdist(XA, XB, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
| {
"content_hash": "c080f8b1ce6d0eb050d362de2f49a403",
"timestamp": "",
"source": "github",
"line_count": 2187,
"max_line_length": 87,
"avg_line_length": 31.10562414266118,
"alnum_prop": 0.538822249661904,
"repo_name": "DailyActie/Surrogate-Model",
"id": "85f260110191fef2a14f8dce6510c14927b52c97",
"size": "68028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scipy-master/scipy/spatial/distance.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from pyensembl import Genome
from nose.tools import eq_, ok_, assert_raises
from .data import data_path
MOUSE_ENSMUSG00000017167_PATH = data_path(
"mouse.ensembl.81.partial.ENSMUSG00000017167.gtf")
MOUSE_ENSMUSG00000017167_TRANSCRIPT_FASTA_PATH = data_path(
"mouse.ensembl.81.partial.ENSMUSG00000017167.fa")
# MOUSE_ENSMUSG00000088969_NCRNA_FASTA_PATH = data_path(
# "mouse.ensembl.81.partial.ncrna.ENSMUSG00000017167.fa")
MOUSE_ENSMUSG00000017167_PROTEIN_FASTA_PATH = data_path(
"mouse.ensembl.81.partial.ENSMUSG00000017167.pep")
def no_gtf_(cm):
print("Testing for 'GTF' in %s : %s" % (
type(cm.exception),
cm.exception))
ok_("GTF" in str(cm.exception))
def no_transcript_(cm):
print("Testing for 'transcript' in %s : %s" % (
type(cm.exception),
cm.exception))
ok_("transcript" in str(cm.exception))
def no_protein_(cm):
print("Testing for 'protein' in %s : %s" % (
type(cm.exception),
cm.exception))
ok_("protein" in str(cm.exception))
def test_transcript_fasta_only():
genome = Genome(
reference_name="GRCm38",
annotation_name="_test_mouse_ensembl81_subset",
transcript_fasta_paths_or_urls=[MOUSE_ENSMUSG00000017167_TRANSCRIPT_FASTA_PATH])
genome.index()
eq_(2, len(genome.transcript_sequences.fasta_dictionary))
with assert_raises(ValueError) as cm:
genome.genes()
no_gtf_(cm)
with assert_raises(ValueError) as cm:
genome.gene_ids()
no_gtf_(cm)
with assert_raises(ValueError) as cm:
genome.gene_ids_of_gene_name("test")
no_gtf_(cm)
with assert_raises(ValueError) as cm:
genome.transcript_names()
no_gtf_(cm)
with assert_raises(ValueError) as cm:
genome.protein_sequence("test")
no_protein_(cm)
def test_protein_fasta_only():
genome_only_proteins = Genome(
reference_name="GRCm38",
annotation_name="_test_mouse_ensembl81_subset",
protein_fasta_paths_or_urls=[MOUSE_ENSMUSG00000017167_PROTEIN_FASTA_PATH])
genome_only_proteins.index()
eq_(4, len(genome_only_proteins.protein_sequences.fasta_dictionary))
with assert_raises(ValueError) as cm:
genome_only_proteins.genes()
no_gtf_(cm)
with assert_raises(ValueError) as cm:
genome_only_proteins.transcript_sequence("DOES_NOT_EXIST")
no_transcript_(cm)
def test_gtf_only():
genome_only_gtf = Genome(
reference_name="GRCm38",
annotation_name="_test_mouse_ensembl81_subset",
gtf_path_or_url=MOUSE_ENSMUSG00000017167_PATH)
genome_only_gtf.index()
eq_(1, len(genome_only_gtf.genes()))
with assert_raises(ValueError) as cm:
genome_only_gtf.transcript_sequence("DOES_NOT_EXIST")
no_transcript_(cm)
with assert_raises(ValueError) as cm:
genome_only_gtf.protein_sequence("genome_only_gtf")
no_protein_(cm)
def test_gtf_transcript_only():
genome_gtf_with_cdna = Genome(
reference_name="GRCm38",
annotation_name="_test_mouse_ensembl81_subset",
gtf_path_or_url=MOUSE_ENSMUSG00000017167_PATH,
transcript_fasta_paths_or_urls=[MOUSE_ENSMUSG00000017167_TRANSCRIPT_FASTA_PATH])
genome_gtf_with_cdna.index()
eq_(1, len(genome_gtf_with_cdna.genes()))
transcript = genome_gtf_with_cdna.transcripts()[0]
ok_(transcript.sequence)
with assert_raises(ValueError) as cm:
transcript.protein_sequence
no_protein_(cm)
def test_gtf_protein_only():
genome_gtf_with_proteins = Genome(
reference_name="GRCm38",
annotation_name="_test_mouse_ensembl81_subset",
gtf_path_or_url=MOUSE_ENSMUSG00000017167_PATH,
protein_fasta_paths_or_urls=[MOUSE_ENSMUSG00000017167_PROTEIN_FASTA_PATH])
genome_gtf_with_proteins.index()
eq_(1, len(genome_gtf_with_proteins.genes()))
transcript = genome_gtf_with_proteins.transcripts()[0]
ok_(transcript.protein_sequence)
with assert_raises(ValueError) as cm:
transcript.sequence
no_transcript_(cm)
| {
"content_hash": "c04434239827976794af2707811eda84",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 88,
"avg_line_length": 30.96969696969697,
"alnum_prop": 0.673679060665362,
"repo_name": "hammerlab/pyensembl",
"id": "6236e9b54ca30785bbc397b80fb70ffe460569a6",
"size": "4088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_missing_genome_sources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Pep8",
"bytes": "3514"
},
{
"name": "Python",
"bytes": "234062"
},
{
"name": "Shell",
"bytes": "433"
}
],
"symlink_target": ""
} |
from portality.api.current.crud.common import CrudApi
from portality.api.common import Api404Error, Api400Error, Api403Error
from portality.api.current.crud import ApplicationsCrudApi
from copy import deepcopy
class ApplicationsBulkApi(CrudApi):
API_KEY_OPTIONAL = False
# ~~->Swagger:Feature~~
# ~~->API:Documentation~~
SWAG_TAG = 'Bulk API'
SWAG_DELETE_PARAM = {
"description": "<div class=\"search-query-docs\">List of DOAJ application IDs to be deleted. You must own all of the ids, and they must all not have entered the DOAJ workflow yet, or none of them will be processed.e.g. [4cf8b72139a749c88d043129f00e1b07, 8e896b60-35f1-4cd3-b3f9-07f7f29d8a98].</div>",
"required": True,
"schema": {"type" : "string"},
"name": "application_ids",
"in": "body"
}
SWAG_APPLICATION_BODY_PARAM = {
"description": "<div class=\"search-query-docs\">List of Application JSON objects that you would like to create. Each element of the list should comply with the schema displayed in the <a href=\"/api/docs#CRUD_Applications_get_api_application_application_id\"> GET (Retrieve) an application route</a>.</div>",
"required": True,
"schema": {"type" : "string"},
"name": "application_json",
"in": "body"
}
@classmethod
def create_swag(cls):
template = deepcopy(cls.SWAG_TEMPLATE)
template['parameters'].append(cls.SWAG_APPLICATION_BODY_PARAM)
template['responses']['201'] = cls.R201_BULK
template['responses']['400'] = cls.R400
return cls._build_swag_response(template)
@classmethod
def create(cls, applications, account):
# we run through create twice, once as a dry-run and the second time
# as the real deal
# ~~->APICrudApplications:Feature~~
for a in applications:
ApplicationsCrudApi.create(a, account, dry_run=True)
ids = []
for a in applications:
n = ApplicationsCrudApi.create(a, account)
ids.append(n.id)
return ids
@classmethod
def delete_swag(cls):
template = deepcopy(cls.SWAG_TEMPLATE)
template['parameters'].append(cls.SWAG_DELETE_PARAM)
template['responses']['204'] = cls.R204
template['responses']['400'] = cls.R400
return cls._build_swag_response(template)
@classmethod
def delete(cls, application_ids, account):
# we run through delete twice, once as a dry-run and the second time
# as the real deal
# ~~->APICrudApplications:Feature~~
for id in application_ids:
try:
ApplicationsCrudApi.delete(id, account, dry_run=True)
except Api404Error as e:
raise Api400Error("Id {x} does not exist or does not belong to this user account".format(x=id))
except Api403Error as e:
raise Api400Error("Id {x} is not in a state which allows it to be deleted".format(x=id))
for id in application_ids:
ApplicationsCrudApi.delete(id, account)
| {
"content_hash": "d480a375310baa60f9de3e6e9680ce85",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 317,
"avg_line_length": 42.32876712328767,
"alnum_prop": 0.6411003236245955,
"repo_name": "DOAJ/doaj",
"id": "9665035bac91a9a62d25c71b32e02a37ff11111a",
"size": "3141",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/api/current/bulk/applications.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
} |
import glob
import numpy as np
from nmrglue.analysis.leastsqbound import leastsqbound
# exponential function to fit data to.
def fit_func(p, x):
A, R2 = p
return A * np.exp(-1.0 * np.array(x) * R2 / 1.0e6)
# residuals between fit and experimental data.
def residuals(p, y, x):
err = y - fit_func(p, x)
return err
# prepare fitting parameters
relaxation_times = np.loadtxt("relaxation_times.in")
x0 = [1.0, 0.10] # initial fitting parameter
bounds = [(0.98, 1.02), (None, None)] # fitting constraints
# create an output file to record the fitting results
output = open('fits.txt', 'w')
output.write("#Peak\tA\t\tR2\t\tier\n")
# loop over the trajecory files
for filename in glob.glob('*.dat'):
peak = filename[:3]
print "Fitting Peak:", peak
# fit the trajectory using contrainted least squares optimization
trajectory = np.loadtxt(filename)
x, ier = leastsqbound(residuals, x0, bounds=bounds,
args=(trajectory, relaxation_times))
# write fitting results to output file
output.write('%s\t%.6f\t%.6f\t%i\n' % (peak, x[0], x[1], ier))
output.close() # close the output file
| {
"content_hash": "a369a9935d17176d50581c8444ae68e7",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 69,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.6606529209621993,
"repo_name": "atomman/nmrglue",
"id": "8f2b84cce72b1c2556bdbe74c0fc87856cbdb4f7",
"size": "1164",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/jbnmr_examples/s12-s15_relaxation_analysis/fit_exp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "25386"
},
{
"name": "Python",
"bytes": "837166"
},
{
"name": "Shell",
"bytes": "10372"
}
],
"symlink_target": ""
} |
class Dialog(object):
"""
The dialog class is used to declare the information passed to the user and retrieved from the user
during an interaction.
"""
components = ()
""" Components to display in the given order in the dialog """
title = None
""" Title of the dialog """
reason = None
""" An explanation for what the data in this dialog is needed for. """
scope = None
""" Unique scope identifier for the data to be stored in the answer files. Scope + component key is used to
address values.
"""
def __init__(self, scope, reason, title=None, components=None):
"""
:param scope: Unique scope identifier for the data to be stored in the answer files. Scope + component key
is used to address values.
:type scope: str
:param reason: An explanation for what the data in this dialog is needed for.
:type reason: str
:param title: Title of the dialog
:type title: str
:param components: Components to display in the given order in the dialog
:type components: tuple(leapp.dialogs.components.Component)
"""
self.components = components or self.components
self.title = title
self.scope = scope
self.reason = reason
self._store = None
self._min_label_width = None
@property
def min_label_width(self):
"""
:return: Returns the highest number of characters all labels in the dialog have, to help calculating the minimum
width the labels should have.
"""
if not self._min_label_width:
self._min_label_width = max(len(comp.label) for comp in self.components if comp.label)
return self._min_label_width
def answer(self, component, value):
"""
Implements storing of answers.
:param component: Component for which the answer is set
:param value: The answer value
:return: None
"""
component.value = value
self._store.answer(self.scope, component.key, value)
def component_by_key(self, key):
"""
Finds the component with the given key
:param key: Key of the component to return
:type key: str
:return: Component found or None
"""
for component in self.components:
if component.key == key:
return component
return None
def request_answers(self, store, renderer):
"""
:param store: AnswerStore instance
:param renderer: Target renderer instance
:return: Dictionary with answers once retrieved
"""
if any([component.value is None for component in self.components]):
self._store = store
renderer.render(self)
self._store = None
return dict(store.get(self.scope))
| {
"content_hash": "ef927e1c0341e46d54fae2d2375fb933",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 120,
"avg_line_length": 34.2,
"alnum_prop": 0.6081871345029239,
"repo_name": "vinzenz/prototype",
"id": "4ab269c02648569a81484650330ef3995c7c2235",
"size": "2907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leapp/dialogs/dialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1688"
},
{
"name": "HTML",
"bytes": "35793"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "PLpgSQL",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "290041"
},
{
"name": "Ruby",
"bytes": "1363"
},
{
"name": "Shell",
"bytes": "1416"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import Any
from flask import Response, request
from itsdangerous.exc import BadSignature
from itsdangerous.url_safe import URLSafeSerializer
from sqlalchemy.orm.session import Session
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import BadRequest, NotFound
from airflow.api_connexion.schemas.log_schema import LogResponseObject, logs_schema
from airflow.api_connexion.types import APIResponse
from airflow.exceptions import TaskNotFound
from airflow.models import TaskInstance
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.log.log_reader import TaskLogReader
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_log(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
task_try_number: int,
full_content: bool = False,
map_index: int = -1,
token: str | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get logs for specific task instance."""
key = get_airflow_app().config["SECRET_KEY"]
if not token:
metadata = {}
else:
try:
metadata = URLSafeSerializer(key).loads(token)
except BadSignature:
raise BadRequest("Bad Signature. Please use only the tokens provided by the API.")
if metadata.get("download_logs") and metadata["download_logs"]:
full_content = True
if full_content:
metadata["download_logs"] = True
else:
metadata["download_logs"] = False
task_log_reader = TaskLogReader()
if not task_log_reader.supports_read:
raise BadRequest("Task log handler does not support read logs.")
ti = (
session.query(TaskInstance)
.filter(
TaskInstance.task_id == task_id,
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == dag_run_id,
TaskInstance.map_index == map_index,
)
.join(TaskInstance.dag_run)
.one_or_none()
)
if ti is None:
metadata["end_of_log"] = True
raise NotFound(title="TaskInstance not found")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if dag:
try:
ti.task = dag.get_task(ti.task_id)
except TaskNotFound:
pass
return_type = request.accept_mimetypes.best_match(["text/plain", "application/json"])
# return_type would be either the above two or None
logs: Any
if return_type == "application/json" or return_type is None: # default
logs, metadata = task_log_reader.read_log_chunks(ti, task_try_number, metadata)
logs = logs[0] if task_try_number is not None else logs
# we must have token here, so we can safely ignore it
token = URLSafeSerializer(key).dumps(metadata) # type: ignore[assignment]
return logs_schema.dump(LogResponseObject(continuation_token=token, content=logs))
# text/plain. Stream
logs = task_log_reader.read_log_stream(ti, task_try_number, metadata)
return Response(logs, headers={"Content-Type": return_type})
| {
"content_hash": "010bb132a13dcc297bcd00d29d94ee88",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 94,
"avg_line_length": 35.05154639175258,
"alnum_prop": 0.6755882352941176,
"repo_name": "apache/airflow",
"id": "388b164727e90d440880fb5fa244f3fbe885e3c2",
"size": "4185",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/api_connexion/endpoints/log_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "statsmodels-"
cfg.versionfile_source = "statsmodels/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir does not start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we do not want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py has not already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there is not one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' does not start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' does not start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we do not already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you should not be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations do not do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| {
"content_hash": "ee1872bd85dd9a79a0b5af6c54bb4fdc",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 79,
"avg_line_length": 35.28235294117647,
"alnum_prop": 0.5701344892742025,
"repo_name": "jseabold/statsmodels",
"id": "0f646a2c723b2d037c1af1dbef95d2eafa826868",
"size": "18469",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statsmodels/_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from scimath.units.unit_system import *
| {
"content_hash": "181d6a0f37931a77fe3897f1e80dfe4c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 39,
"avg_line_length": 39.5,
"alnum_prop": 0.7848101265822784,
"repo_name": "enthought/etsproxy",
"id": "2196936433eb9c1811655c06331e4b1350f1aa19",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/units/unit_system.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.