text stringlengths 4 1.02M | meta dict |
|---|---|
"""
.. _example-movement-comp:
==============================================
Maxwell filter data with movement compensation
==============================================
Demonstrate movement compensation on simulated data. The simulated data
contains bilateral activation of auditory cortices, repeated over 14
different head rotations (head center held fixed). See the following for
details:
https://github.com/mne-tools/mne-misc-data/blob/master/movement/simulate.py
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from os import path as op
import mne
from mne.preprocessing import maxwell_filter
print(__doc__)
data_path = op.join(mne.datasets.misc.data_path(verbose=True), 'movement')
head_pos = mne.chpi.read_head_pos(op.join(data_path, 'simulated_quats.pos'))
raw = mne.io.read_raw_fif(op.join(data_path, 'simulated_movement_raw.fif'))
raw_stat = mne.io.read_raw_fif(op.join(data_path,
'simulated_stationary_raw.fif'))
###############################################################################
# Visualize the "subject" head movements. By providing the measurement
# information, the distance to the nearest sensor in each direction
# (e.g., left/right for the X direction, forward/backward for Y) can
# be shown in blue, and the destination (if given) shown in red.
mne.viz.plot_head_positions(
head_pos, mode='traces', destination=raw.info['dev_head_t'], info=raw.info)
###############################################################################
# This can also be visualized using a quiver.
mne.viz.plot_head_positions(
head_pos, mode='field', destination=raw.info['dev_head_t'], info=raw.info)
###############################################################################
# Process our simulated raw data (taking into account head movements).
# extract our resulting events
events = mne.find_events(raw, stim_channel='STI 014')
events[:, 2] = 1
raw.plot(events=events)
topo_kwargs = dict(times=[0, 0.1, 0.2], ch_type='mag', vmin=-500, vmax=500,
time_unit='s')
###############################################################################
# First, take the average of stationary data (bilateral auditory patterns).
evoked_stat = mne.Epochs(raw_stat, events, 1, -0.2, 0.8).average()
evoked_stat.plot_topomap(title='Stationary', **topo_kwargs)
###############################################################################
# Second, take a naive average, which averages across epochs that have been
# simulated to have different head positions and orientations, thereby
# spatially smearing the activity.
epochs = mne.Epochs(raw, events, 1, -0.2, 0.8)
evoked = epochs.average()
evoked.plot_topomap(title='Moving: naive average', **topo_kwargs)
###############################################################################
# Third, use raw movement compensation (restores pattern).
raw_sss = maxwell_filter(raw, head_pos=head_pos)
evoked_raw_mc = mne.Epochs(raw_sss, events, 1, -0.2, 0.8).average()
evoked_raw_mc.plot_topomap(title='Moving: movement compensated (raw)',
**topo_kwargs)
###############################################################################
# Fourth, use evoked movement compensation. For these data, which contain
# very large rotations, it does not as cleanly restore the pattern.
evoked_evo_mc = mne.epochs.average_movements(epochs, head_pos=head_pos)
evoked_evo_mc.plot_topomap(title='Moving: movement compensated (evoked)',
**topo_kwargs)
| {
"content_hash": "f89538b2113018c62f1084227b97f753",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 41.98823529411764,
"alnum_prop": 0.5844774446623704,
"repo_name": "kambysese/mne-python",
"id": "9a37f9e2ef358e5bb607b1bcff3821e14c37b1a9",
"size": "3569",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "examples/preprocessing/plot_movement_compensation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3912"
},
{
"name": "Python",
"bytes": "5978369"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.example, name='index'),
url(r'^example$', views.example, name='example'),
url(r'^wholepgmd$', views.whole_page_is_markdown, name='whole_page_is_markdown'),
url(r'^pgwithmd$', views.page_including_markdown, name='page_including_markdown'),
]
| {
"content_hash": "3373331d9d3180a65f58729b3c789ec1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 28.916666666666668,
"alnum_prop": 0.6772334293948127,
"repo_name": "oditorium/mdown",
"id": "5eda601065469680494ccc37b22c9c0e931d8e04",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2378"
},
{
"name": "Python",
"bytes": "4972"
}
],
"symlink_target": ""
} |
import pytest
import responses
from flask import Flask
from urlobject import URLObject
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.storage import MemoryStorage
from flask_dance.contrib.strava import make_strava_blueprint, strava
@pytest.fixture
def make_app():
"A callable to create a Flask app with the Strava provider"
def _make_app(*args, **kwargs):
app = Flask(__name__)
app.secret_key = "whatever"
blueprint = make_strava_blueprint(*args, **kwargs)
app.register_blueprint(blueprint)
return app
return _make_app
def test_blueprint_factory():
strava_bp = make_strava_blueprint(
client_id="foo", client_secret="bar", scope="identity", redirect_to="index"
)
assert isinstance(strava_bp, OAuth2ConsumerBlueprint)
assert strava_bp.session.scope == "identity"
assert strava_bp.session.base_url == "https://www.strava.com/api/v3"
assert strava_bp.session.client_id == "foo"
assert strava_bp.client_secret == "bar"
assert (
strava_bp.authorization_url == "https://www.strava.com/api/v3/oauth/authorize"
)
assert strava_bp.token_url == "https://www.strava.com/api/v3/oauth/token"
def test_load_from_config(make_app):
app = make_app()
app.config["STRAVA_OAUTH_CLIENT_ID"] = "foo"
app.config["STRAVA_OAUTH_CLIENT_SECRET"] = "bar"
resp = app.test_client().get("/strava")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local(make_app):
responses.add(responses.GET, "https://google.com")
# set up two apps with two different set of auth tokens
app1 = make_app(
"foo1",
"bar1",
redirect_to="url1",
storage=MemoryStorage({"access_token": "app1"}),
)
app2 = make_app(
"foo2",
"bar2",
redirect_to="url2",
storage=MemoryStorage({"access_token": "app2"}),
)
# outside of a request context, referencing functions on the `strava` object
# will raise an exception
with pytest.raises(RuntimeError):
strava.get("https://google.com")
# inside of a request context, `strava` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
strava.get("https://google.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
strava.get("https://google.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
| {
"content_hash": "92e4486f4332f45e0b38d288e417f924",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 86,
"avg_line_length": 32.174418604651166,
"alnum_prop": 0.6559450668594146,
"repo_name": "singingwolfboy/flask-dance",
"id": "25cec90e0d20b5fc736854accd872fd7c44a7f68",
"size": "2767",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/contrib/test_strava.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "329946"
}
],
"symlink_target": ""
} |
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import pyximport
pyximport.install()
except ImportError:
raise Exception('Running tests expects Cython!')
import sys
import time
import unittest
import diff_match_patch as dmp_module
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "91bf216f3234330e7303859a42c24eb1",
"timestamp": "",
"source": "github",
"line_count": 870,
"max_line_length": 408,
"avg_line_length": 47.9367816091954,
"alnum_prop": 0.645366263038005,
"repo_name": "zapier/diff-match-patch-cython",
"id": "b670c153567115429699617adbc3d2451d22f0a8",
"size": "41727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/diff_match_patch/diff_match_patch_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "220731"
}
],
"symlink_target": ""
} |
import json
from .stage01_isotopomer_spectrumAccuracy_query import stage01_isotopomer_spectrumAccuracy_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class stage01_isotopomer_spectrumAccuracy_io(stage01_isotopomer_spectrumAccuracy_query,sbaas_template_io):
def export_compareAveragesSpectrumToTheoretical(self, experiment_id_I, filename, sample_name_abbreviations_I=None,scan_types_I=None,met_ids_I = None):
'''export a comparison of calculated spectrum to theoretical spectrum'''
# query the data
data = [];
# get time points
time_points = self.get_timePoint_experimentID_dataStage01Averages(experiment_id_I);
for tp in time_points:
print('Reporting average precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
if sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
# query sample types from sample name abbreviations and time-point from _dataStage01Averages
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Averages(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Reporting average precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Averages(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Averages(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Reporting average precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Averages( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Reporting average precursor and product spectrum for metabolite ' + met);
data_tmp = [];
data_tmp = self.get_dataPrecursorFragment_experimentIDAndTimePointSampleAbbreviationAndSampleTypeAndScanTypeAndMetID_dataStage01Averages(\
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met);
data.extend(data_tmp);
data_tmp = [];
data_tmp = self.get_dataProductFragment_experimentIDAndTimePointSampleAbbreviationAndSampleTypeAndScanTypeAndMetID_dataStage01Averages(\
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met);
data.extend(data_tmp);
# write the comparison to file
headerL1 = ['sample_name_abbreviation','time_point','met_id','fragment_formula','C_pos','scan_type','theoretical'] + ['' for i in range(49)]\
+ ['measured'] + ['' for i in range(49)]\
+ ['measured_cv'] + ['' for i in range(49)]\
+ ['abs_difference'] + ['' for i in range(49)];
headerL2 = ['' for i in range(6)] + ['a' + str(i) for i in range(50)]\
+ ['a' + str(i) for i in range(50)]\
+ ['a' + str(i) for i in range(50)]\
+ ['a' + str(i) for i in range(50)];
header = [];
header.append(headerL1);
header.append(headerL2);
export = base_exportData(data);
export.write_headersAndElements2csv(header,filename);
def export_compareAveragesNormSumSpectrumToTheoretical(self, experiment_id_I, filename, sample_name_abbreviations_I=None,scan_types_I=None,met_ids_I = None):
'''export a comparison of calculated spectrum to theoretical spectrum'''
# query the data
data = [];
# get time points
time_points = self.get_timePoint_experimentID_dataStage01AveragesNormSum(experiment_id_I);
for tp in time_points:
print('Reporting average precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
if sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01AveragesNormSum(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Reporting average precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01AveragesNormSum(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01AveragesNormSum(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Reporting average precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01AveragesNormSum( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Reporting average precursor and product spectrum for metabolite ' + met);
data_tmp = [];
data_tmp = self.get_dataPrecursorFragment_experimentIDAndTimePointSampleAbbreviationAndSampleTypeAndScanTypeAndMetID_dataStage01AveragesNormSum(\
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met);
data.extend(data_tmp);
data_tmp = [];
data_tmp = self.get_dataProductFragment_experimentIDAndTimePointSampleAbbreviationAndSampleTypeAndScanTypeAndMetID_dataStage01AveragesNormSum(\
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met);
data.extend(data_tmp);
# write the comparison to file
headerL1 = ['sample_name_abbreviation','time_point','met_id','fragment_formula','C_pos','scan_type','theoretical'] + ['' for i in range(49)]\
+ ['measured'] + ['' for i in range(49)]\
+ ['measured_cv'] + ['' for i in range(49)]\
+ ['abs_difference'] + ['' for i in range(49)]\
+ ['average_accuracy'];
headerL2 = ['' for i in range(6)] + ['a' + str(i) for i in range(50)]\
+ ['a' + str(i) for i in range(50)]\
+ ['a' + str(i) for i in range(50)]\
+ ['a' + str(i) for i in range(50)]\
+ [''];
header = [];
header.append(headerL1);
header.append(headerL2);
export = base_exportData(data);
export.write_headersAndElements2csv(header,filename);
| {
"content_hash": "4be134e9cccfcc186f2a0c5b469caa04",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 193,
"avg_line_length": 67.1118881118881,
"alnum_prop": 0.5951859956236324,
"repo_name": "dmccloskey/SBaaS_isotopomer",
"id": "fa8c29e531f1c70ce625c1cfd088001f1dfe697f",
"size": "9606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SBaaS_isotopomer/stage01_isotopomer_spectrumAccuracy_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "544550"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import xml.etree.ElementTree as ET
import six
from leather.data_types import Text
from leather.series import CategorySeries
from leather.shapes.base import Shape
from leather import theme
from leather.utils import DummySeries, X, Y
class Dots(Shape):
"""
Render a series of data as dots.
:param fill_color:
The color to fill the dots. You may also specify a
:func:`.style_function`. If not specified, default chart colors will be
used.
:param radius:
The radius of the rendered dots. Defaults to
:data:`.theme.default_dot_radius`. You may also specify a
:func:`.style_function`.
"""
def __init__(self, fill_color=None, radius=None):
self._fill_color = fill_color
self._radius = radius or theme.default_dot_radius
def validate_series(self, series):
"""
Verify this shape can be used to render a given series.
"""
if series.data_type(X) is Text or series.data_type(Y) is Text:
raise ValueError('Dots do not support Text values.')
return True
def to_svg(self, width, height, x_scale, y_scale, series, palette):
"""
Render dots to SVG elements.
"""
group = ET.Element('g')
group.set('class', 'series dots')
default_colors = defaultdict(lambda: next(palette))
for d in series.data():
if d.x is None or d.y is None:
continue
proj_x = x_scale.project(d.x, 0, width)
proj_y = y_scale.project(d.y, height, 0)
if callable(self._fill_color):
fill_color = self._fill_color(d)
elif self._fill_color:
fill_color = self._fill_color
else:
fill_color = default_colors[d.z]
if callable(self._radius):
radius = self._radius(d)
else:
radius = self._radius
group.append(ET.Element('circle',
cx=six.text_type(proj_x),
cy=six.text_type(proj_y),
r=six.text_type(radius),
fill=fill_color
))
return group
def legend_to_svg(self, series, palette):
"""
Render the legend entries for these shapes.
"""
items = []
if isinstance(series, CategorySeries):
for category in series.categories():
items.extend(Shape.legend_to_svg(self, DummySeries(category), palette))
else:
items.extend(Shape.legend_to_svg(self, series, palette))
return items
| {
"content_hash": "df056448878e9066041329136f7ad77f",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 87,
"avg_line_length": 30.15909090909091,
"alnum_prop": 0.5715900527505652,
"repo_name": "onyxfish/leather",
"id": "0a5ebfab738c750b2bc9940cb989f294cb356cd8",
"size": "2677",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "leather/shapes/dots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82139"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
} |
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('histological_type')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','BRCA','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##ductal
ductal=[]
for ii in kaplan:
if ii[2]==1:
ductal.append(1)
else:
ductal.append(0)
##metaplastic
metaplastic=[]
for ii in kaplan:
if ii[2]==3:
metaplastic.append(1)
else:
metaplastic.append(0)
##mucinous
mucinous=[]
for ii in kaplan:
if ii[2]==4:
mucinous.append(1)
else:
mucinous.append(0)
##medullary
medullary=[]
for ii in kaplan:
if ii[2]==5:
medullary.append(1)
else:
medullary.append(0)
##lobular
lobular=[]
for ii in kaplan:
if ii[2]==6:
lobular.append(1)
else:
lobular.append(0)
ro.globalenv['ductal']=ro.IntVector(ductal)
ro.globalenv['metaplastic']=ro.IntVector(metaplastic)
ro.globalenv['mucinous']=ro.IntVector(mucinous)
ro.globalenv['medullary']=ro.IntVector(medullary)
ro.globalenv['lobular']=ro.IntVector(lobular)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + ductal + metaplastic + mucinous + medullary + lobular + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','BRCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"content_hash": "0f4d2ac37f0a810be5d29501988855b7",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 142,
"avg_line_length": 33.70944309927361,
"alnum_prop": 0.634822582962218,
"repo_name": "OmnesRes/onco_lnc",
"id": "87e8cbe0decd6b953b7b3459d31c65261f926c31",
"size": "14067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mrna/cox/BRCA/cox_regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1141183"
}
],
"symlink_target": ""
} |
import gtk # Linux DBus Service
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from time import strftime # use for clock simulation - shows time!
from time import sleep # use for delay in loops - wait for n sec.!
from threading import Thread # use to create a single threat for time
import sys # for unicode_to_kos0006u
import types # for unicode_to_kos0006u
import socket # check if tinker is available
from tinkerforge.ip_connection import IPConnection # Tinker bindings
from tinkerforge.brick_master import Master
from tinkerforge.bricklet_io16 import IO16
from tinkerforge.bricklet_rotary_poti import RotaryPoti
from tinkerforge.bricklet_lcd_20x4 import LCD20x4
from tinkerforge.bricklet_joystick import Joystick
from tinkerforge.bricklet_industrial_quad_relay import IndustrialQuadRelay
try: # import myown Board and Menu Library!
from Board import Board as B
from Menu import Menu as M
except ImportError as err:
print err
def isOpen(ip,port): # check socket # used to check if tinker is online
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
class MyDBUSService(dbus.service.Object):
def __init__(self):
bus_name = dbus.service.BusName('org.limeblack.roomcontroll', bus=dbus.SessionBus())
dbus.service.Object.__init__(self, bus_name, '/org/limeblack/roomcontroll')
try:
print 'Start initalisation'
self.BoardIsRunning = False
self.MenuIsRunning = False
self.PORT = 4223
### Connection for Menu
self.MENU_HOST = "192.168.0.150" # Manually Set IP of Controller Board
self.MENU_lcdUID = "gFt" # LCD Screen
self.MENU_jskUID = "hAP" # Joystick
### END MENU CONNECTION
### Connection for Board
self.BOARD_HOST = "192.168.0.111"
self.BOARD_mstUID = "62eUEf" # master brick
self.BOARD_io1UID = "ghh" # io16
self.BOARD_lcdUID = "9ew" # lcd screen 20x4
self.BOARD_iqrUID = "eRN" # industrial quad relay
#### END BOARD CONNECTION
if isOpen(self.BOARD_HOST, self.PORT):
self.BoardIsRunning = True
self.BOARD_ipcon = IPConnection() # Create IP connection
self.mst = Master(self.BOARD_mstUID, self.BOARD_ipcon) # Master Brick
self.io1 = IO16(self.BOARD_io1UID, self.BOARD_ipcon) # io16
self.lcd1 = LCD20x4(self.BOARD_lcdUID, self.BOARD_ipcon) # lcd20x4
self.iqr = IndustrialQuadRelay(self.BOARD_iqrUID, self.BOARD_ipcon) # Create device object
self.BOARD_ipcon.connect(self.BOARD_HOST, self.PORT) # Connect to brickd
# create Board instance
self.BB = B(self.mst, self.io1, self.lcd1, self.iqr, self.BOARD_ipcon)
else:
print 'board offline'
if isOpen(self.MENU_HOST, self.PORT):
self.MenuIsRunning = True
# Connect to WLAN Controller
self.MENU_ipcon = IPConnection() # Create IP connection
self.lcd = LCD20x4(self.MENU_lcdUID, self.MENU_ipcon) # Create device object LCD
self.jsk = Joystick(self.MENU_jskUID, self.MENU_ipcon) # Create device object JOYSTICK
# Don't use device before ipcon is connected
self.MENU_ipcon.connect(self.MENU_HOST, self.PORT) # Connect to brickd
# create Menu instance with the nessesary Hardware # IPCON to close Tinker Connection
self.MM = M(self.jsk, self.lcd, self.MENU_ipcon)
else:
print 'menu is offline'
print 'Initialisation ready!'
except Exception as errtxt:
print errtxt
@dbus.service.method('org.limeblack.roomcontroll')
def status(self):
return 'Board: '+str(self.BoardIsRunning)+'\nMenu: '+str(self.MenuIsRunning)
@dbus.service.method('org.limeblack.roomcontroll')
def startBoard(self):
if self.BoardIsRunning: return 'Board already running!'
if isOpen(self.BOARD_HOST, self.PORT):
self.BoardIsRunning = True
self.BOARD_ipcon = IPConnection() # Create IP connection
self.mst = Master(self.BOARD_mstUID, self.BOARD_ipcon) # Master Brick
self.io1 = IO16(self.BOARD_io1UID, self.BOARD_ipcon) # io16
self.lcd1 = LCD20x4(self.BOARD_lcdUID, self.BOARD_ipcon) # lcd20x4
self.iqr = IndustrialQuadRelay(self.BOARD_iqrUID, self.BOARD_ipcon) # Create device object
self.BOARD_ipcon.connect(self.BOARD_HOST, self.PORT) # Connect to brickd
# create Board instance
self.BB = B(self.mst, self.io1, self.lcd1, self.iqr, self.BOARD_ipcon)
else:
return 'Board is offline'
return "Hello, Board successfully started!"
@dbus.service.method('org.limeblack.roomcontroll')
def startMenu(self):
if self.MenuIsRunning: return 'Menu already running!'
if isOpen(self.MENU_HOST, self.PORT):
self.MenuIsRunning = True
# Connect to WLAN Controller
self.MENU_ipcon = IPConnection() # Create IP connection
self.lcd = LCD20x4(self.MENU_lcdUID, self.MENU_ipcon) # Create device object LCD
self.jsk = Joystick(self.MENU_jskUID, self.MENU_ipcon) # Create device object JOYSTICK
# Don't use device before ipcon is connected
self.MENU_ipcon.connect(self.MENU_HOST, self.PORT) # Connect to brickd
# create Menu instance with the nessesary Hardware # IPCON to close Tinker Connection
self.MM = M(self.jsk, self.lcd, self.MENU_ipcon)
else:
return 'Menu is offline'
return "Hello, Menu successfully started!"
@dbus.service.method('org.limeblack.roomcontroll')
def bye(self):
if self.BoardIsRunning:
self.BoardIsRunning = False
self.BB.quit()
if self.MenuIsRunning:
self.MenuIsRunning = False
self.MM.quit()
return "Shutdown successfully!"
DBusGMainLoop(set_as_default=True)
myservice = MyDBUSService()
gtk.main()
| {
"content_hash": "b5154532b6bba412b1e5f0cf04d23c87",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 106,
"avg_line_length": 38.132183908045974,
"alnum_prop": 0.6088922381311228,
"repo_name": "DeathPoison/roomControll",
"id": "d5acd6a6215e25a6539dddb305beaeab1285b89e",
"size": "6763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DBusSocket/OLD_dbusService.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81722"
}
],
"symlink_target": ""
} |
import click
import fiona
from shapely.geometry import shape
import networkx as nx
import matplotlib.pyplot as plt
import surficial as srf
@click.command()
@click.argument('alignment', nargs=1, type=click.Path(exists=True))
@click.pass_context
def network(ctx, alignment):
"""Plots the network graph
\b
Example:
surficial network stream_ln.shp
"""
with fiona.open(alignment) as alignment_src:
lines = [shape(line['geometry']) for line in alignment_src]
graph = srf.Alignment(lines)
# plot
fig = plt.figure()
ax = fig.add_subplot(111)
nx.draw(graph, ax=ax, with_labels=True, node_color='w')
plt.show()
| {
"content_hash": "93b24bb7d50919b6883b3ef669650314",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 67,
"avg_line_length": 21.451612903225808,
"alnum_prop": 0.6827067669172933,
"repo_name": "mrahnis/surficial",
"id": "373f860c38cc575e243ff10b55f6238a6357dde4",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "surficial/cli/network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "518"
},
{
"name": "Python",
"bytes": "155952"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as pylab
import numpy as np
def load_data():
with open('julyTemps.txt', 'r') as f:
raw_data = f.readlines()[6:]
high = []
low = []
for line in raw_data:
fixed_line = line.strip().split()
high.append(int(fixed_line[1]))
low.append(int(fixed_line[2]))
return (low, high)
def produce_plot(low, high):
diffTemps = np.array(high) - np.array(low)
pylab.plot(diffTemps)
pylab.title('Day by Day Ranges in Temperature in Boston in July 2012')
pylab.xlabel('Days')
pylab.ylabel('Temperature Ranges')
pylab.show()
(low, high) = load_data()
produce_plot(low, high)
| {
"content_hash": "03a633cc9d26ee2a87d7176089398701",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 23.79310344827586,
"alnum_prop": 0.5942028985507246,
"repo_name": "NicholasAsimov/courses",
"id": "fb682b30b860574966cab4e138b3266e5e4765c7",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "6.00.2x/week1/lecture1/problem3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25637"
},
{
"name": "Python",
"bytes": "52700"
}
],
"symlink_target": ""
} |
import os, subprocess, time, signal
import gym
from gym import error, spaces
from gym import utils
from gym.utils import seeding
try:
import hfo_py
except ImportError as e:
raise error.DependencyNotInstalled("{}. (HINT: you can install HFO dependencies with 'pip install gym[soccer].)'".format(e))
import logging
logger = logging.getLogger(__name__)
class SoccerEnv(gym.Env, utils.EzPickle):
metadata = {'render.modes': ['human']}
def __init__(self):
self.viewer = None
self.server_process = None
self.server_port = None
self.hfo_path = hfo_py.get_hfo_path()
self._configure_environment()
self.env = hfo_py.HFOEnvironment()
self.env.connectToServer(config_dir=hfo_py.get_config_path())
self.observation_space = spaces.Box(low=-1, high=1,
shape=(self.env.getStateSize()))
# Action space omits the Tackle/Catch actions, which are useful on defense
self.action_space = spaces.Tuple((spaces.Discrete(3),
spaces.Box(low=0, high=100, shape=1),
spaces.Box(low=-180, high=180, shape=1),
spaces.Box(low=-180, high=180, shape=1),
spaces.Box(low=0, high=100, shape=1),
spaces.Box(low=-180, high=180, shape=1)))
self.status = hfo_py.IN_GAME
def __del__(self):
self.env.act(hfo_py.QUIT)
self.env.step()
os.kill(self.server_process.pid, signal.SIGINT)
if self.viewer is not None:
os.kill(self.viewer.pid, signal.SIGKILL)
def _configure_environment(self):
"""
Provides a chance for subclasses to override this method and supply
a different server configuration. By default, we initialize one
offense agent against no defenders.
"""
self._start_hfo_server()
def _start_hfo_server(self, frames_per_trial=500,
untouched_time=100, offense_agents=1,
defense_agents=0, offense_npcs=0,
defense_npcs=0, sync_mode=True, port=6000,
offense_on_ball=0, fullstate=True, seed=-1,
ball_x_min=0.0, ball_x_max=0.2,
verbose=False, log_game=False,
log_dir="log"):
"""
Starts the Half-Field-Offense server.
frames_per_trial: Episodes end after this many steps.
untouched_time: Episodes end if the ball is untouched for this many steps.
offense_agents: Number of user-controlled offensive players.
defense_agents: Number of user-controlled defenders.
offense_npcs: Number of offensive bots.
defense_npcs: Number of defense bots.
sync_mode: Disabling sync mode runs server in real time (SLOW!).
port: Port to start the server on.
offense_on_ball: Player to give the ball to at beginning of episode.
fullstate: Enable noise-free perception.
seed: Seed the starting positions of the players and ball.
ball_x_[min/max]: Initialize the ball this far downfield: [0,1]
verbose: Verbose server messages.
log_game: Enable game logging. Logs can be used for replay + visualization.
log_dir: Directory to place game logs (*.rcg).
"""
self.server_port = port
cmd = self.hfo_path + \
" --headless --frames-per-trial %i --untouched-time %i --offense-agents %i"\
" --defense-agents %i --offense-npcs %i --defense-npcs %i"\
" --port %i --offense-on-ball %i --seed %i --ball-x-min %f"\
" --ball-x-max %f --log-dir %s"\
% (frames_per_trial, untouched_time, offense_agents,
defense_agents, offense_npcs, defense_npcs, port,
offense_on_ball, seed, ball_x_min, ball_x_max,
log_dir)
if not sync_mode: cmd += " --no-sync"
if fullstate: cmd += " --fullstate"
if verbose: cmd += " --verbose"
if not log_game: cmd += " --no-logging"
print('Starting server with command: %s' % cmd)
self.server_process = subprocess.Popen(cmd.split(' '), shell=False)
time.sleep(10) # Wait for server to startup before connecting a player
def _start_viewer(self):
"""
Starts the SoccerWindow visualizer. Note the viewer may also be
used with a *.rcg logfile to replay a game. See details at
https://github.com/LARG/HFO/blob/master/doc/manual.pdf.
"""
cmd = hfo_py.get_viewer_path() +\
" --connect --port %d" % (self.server_port)
self.viewer = subprocess.Popen(cmd.split(' '), shell=False)
def _step(self, action):
self._take_action(action)
self.status = self.env.step()
reward = self._get_reward()
ob = self.env.getState()
episode_over = self.status != hfo_py.IN_GAME
return ob, reward, episode_over, {}
def _take_action(self, action):
""" Converts the action space into an HFO action. """
action_type = ACTION_LOOKUP[action[0]]
if action_type == hfo_py.DASH:
self.env.act(action_type, action[1], action[2])
elif action_type == hfo_py.TURN:
self.env.act(action_type, action[3])
elif action_type == hfo_py.KICK:
self.env.act(action_type, action[4], action[5])
else:
print('Unrecognized action %d' % action_type)
self.env.act(hfo_py.NOOP)
def _get_reward(self):
""" Reward is given for scoring a goal. """
if self.status == hfo_py.GOAL:
return 1
else:
return 0
def _reset(self):
""" Repeats NO-OP action until a new episode begins. """
while self.status == hfo_py.IN_GAME:
self.env.act(hfo_py.NOOP)
self.status = self.env.step()
while self.status != hfo_py.IN_GAME:
self.env.act(hfo_py.NOOP)
self.status = self.env.step()
return self.env.getState()
def _render(self, mode='human', close=False):
""" Viewer only supports human mode currently. """
if close:
if self.viewer is not None:
os.kill(self.viewer.pid, signal.SIGKILL)
else:
if self.viewer is None:
self._start_viewer()
ACTION_LOOKUP = {
0 : hfo_py.DASH,
1 : hfo_py.TURN,
2 : hfo_py.KICK,
3 : hfo_py.TACKLE, # Used on defense to slide tackle the ball
4 : hfo_py.CATCH, # Used only by goalie to catch the ball
}
| {
"content_hash": "aca6fc5e918272870736288fc609b5f7",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 128,
"avg_line_length": 42.57232704402516,
"alnum_prop": 0.5664056729206678,
"repo_name": "machinaut/gym",
"id": "9c58c245a115973919e19709c3672fb816b33e94",
"size": "6769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gym/envs/soccer/soccer_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "448298"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
from ceilometer.openstack.common import excutils
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common.rpc import amqp as rpc_amqp
from ceilometer.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the reciever on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(delay + 1, 5)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in consumers.itervalues():
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
| {
"content_hash": "d1c285943b6b6e56adc6d3f10a92d92e",
"timestamp": "",
"source": "github",
"line_count": 805,
"max_line_length": 79,
"avg_line_length": 36.03229813664596,
"alnum_prop": 0.5628835413362753,
"repo_name": "JioCloud/ceilometer",
"id": "a0caabd3bd5efbcfff294872dad7116fa14cd245",
"size": "29709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/openstack/common/rpc/impl_qpid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "64962"
},
{
"name": "Python",
"bytes": "1805677"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
import os
import re
from django.conf import settings
from django.core import mail
from django.test import TestCase
from django.contrib.auth.models import User
import pinax
from emailconfirmation.models import EmailAddress, EmailConfirmation
class PasswordResetTest(TestCase):
# tests based on django.contrib.auth tests
urls = "account.tests.account_urls"
def setUp(self):
self.old_installed_apps = settings.INSTALLED_APPS
# remove django-mailer to properly test for outbound e-mail
if "mailer" in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.remove("mailer")
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
def context_lookup(self, response, key):
# used for debugging
for subcontext in response.context:
if key in subcontext:
return subcontext[key]
raise KeyError
def test_password_reset_view(self):
"""
Test GET on /password_reset/
"""
response = self.client.get("/account/password_reset/")
self.assertEquals(response.status_code, 200)
def test_email_not_found(self):
"""
Error is raised if the provided e-mail address isn't verified to an
existing user account
"""
data = {
"email": "nothing@example.com",
}
response = self.client.post("/account/password_reset/", data)
self.assertEquals(response.status_code, 200)
# @@@ instead of hard-coding this error message rely on a error key
# defined in the form where the site developer would override this
# error message.
self.assertContains(response, "Email address not verified for any user account")
self.assertEquals(len(mail.outbox), 0)
def test_email_not_verified(self):
"""
Error is raised if the provided e-mail address isn't verified to an
existing user account
"""
bob = User.objects.create_user("bob", "bob@example.com", "abc123")
EmailAddress.objects.create(
user = bob,
email = "bob@example.com",
verified = False,
primary = True,
)
data = {
"email": "bob@example.com",
}
response = self.client.post("/account/password_reset/", data)
self.assertEquals(response.status_code, 200)
# @@@ instead of hard-coding this error message rely on a error key
# defined in the form where the site developer would override this
# error message.
self.assertContains(response, "Email address not verified for any user account")
self.assertEquals(len(mail.outbox), 0)
def test_email_found(self):
"""
E-mail is sent if a valid e-mail address is provided for password reset
"""
bob = User.objects.create_user("bob", "bob@example.com", "abc123")
EmailAddress.objects.create(
user = bob,
email = "bob@example.com",
verified = True,
primary = True,
)
data = {
"email": "bob@example.com",
}
response = self.client.post("/account/password_reset/", data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
def _read_reset_email(self, email):
match = re.search(r"https?://[^/]*(/.*reset_key/\S*)", email.body)
self.assert_(match is not None, "No URL found in sent e-mail")
return match.group(), match.groups()[0]
def _test_confirm_start(self):
bob = User.objects.create_user("bob", "bob@example.com", "abc123")
EmailAddress.objects.create(
user = bob,
email = "bob@example.com",
verified = True,
primary = True,
)
data = {
"email": "bob@example.com",
}
response = self.client.post("/account/password_reset/", data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
return self._read_reset_email(mail.outbox[0])
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# munge the token in the path, but keep the same length, in case the
# URLconf will reject a different length.
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "New Password (again)")
def test_confirm_invalid_post(self):
url, path = self._test_confirm_start()
# munge the token in the path, but keep the same length, in case the
# URLconf will reject a different length.
path = path[:-5] + ("0"*4) + path[-1]
data = {
"password1": "newpassword",
"password2": "newpassword",
}
response = self.client.post(path, data)
user = User.objects.get(email="bob@example.com")
self.assert_(not user.check_password("newpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
data = {
"password1": "newpassword",
"password2": "newpassword",
}
response = self.client.post(path, data)
self.assertEquals(response.status_code, 200)
# check the password has been changed
user = User.objects.get(email="bob@example.com")
self.assert_(user.check_password("newpassword"))
# check we can't GET with same path
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "The password reset link was invalid")
# check we can't POST with same path
data = {
"password1": "anothernewpassword",
"password2": "anothernewpassword",
}
response = self.client.post(path)
self.assertEquals(response.status_code, 200)
user = User.objects.get(email="bob@example.com")
self.assert_(not user.check_password("anothernewpassword"))
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
data = {
"password1": "newpassword",
"password2": "anothernewpassword",
}
response = self.client.post(path, data)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "You must type the same password each time.")
| {
"content_hash": "ea5b213cb8623de882517c490ea7214d",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 88,
"avg_line_length": 36.984455958549226,
"alnum_prop": 0.5776127766881479,
"repo_name": "caseywstark/colab",
"id": "2cf10ebe293fb32d671a73cdcc8d8b198c82fdf8",
"size": "7138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colab/apps/account/tests/test_password_reset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "79619"
},
{
"name": "Python",
"bytes": "483018"
}
],
"symlink_target": ""
} |
"""Helper library for visualizations.
TODO(googleuser): Find a more reliable way to serve stuff from IPython
notebooks (e.g. determining where the root notebook directory is).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import uuid
from google.protobuf import json_format
from dragnn.protos import trace_pb2
# Make a guess about where the IPython kernel root is.
_IPYTHON_KERNEL_PATH = os.path.realpath(os.getcwd())
# Bazel uses the 'data' attribute for this library to ensure viz.min.js.gz is
# packaged.
module_path = os.path.dirname(os.path.abspath(__file__))
viz_script = os.path.join(os.path.dirname(module_path), 'viz', 'viz.min.js.gz')
def _load_viz_script():
"""Reads the bundled visualization script.
Raises:
EnvironmentError: If the visualization script could not be found.
Returns:
str JavaScript source code.
"""
if not os.path.isfile(viz_script):
raise EnvironmentError(
'Visualization script should be built into {}'.format(viz_script))
with gzip.GzipFile(viz_script) as f:
return f.read()
def parse_trace_json(trace):
"""Converts a binary-encoded MasterTrace proto to a JSON parser trace.
Args:
trace: Binary string containing a MasterTrace.
Returns:
JSON str, as expected by visualization tools.
"""
as_proto = trace_pb2.MasterTrace.FromString(trace)
as_json = json_format.MessageToJson(
as_proto, preserving_proto_field_name=True)
return as_json
def _optional_master_spec_json(master_spec):
"""Helper function to return 'null' or a master spec JSON string."""
if master_spec is None:
return 'null'
else:
return json_format.MessageToJson(
master_spec, preserving_proto_field_name=True)
def _container_div(height='700px', contents=''):
elt_id = str(uuid.uuid4())
html = """
<div id="{elt_id}" style="width: 100%; min-width: 200px; height: {height};">
{contents}</div>
""".format(
elt_id=elt_id, height=height, contents=contents)
return elt_id, html
def trace_html(trace,
convert_to_unicode=True,
height='700px',
script=None,
master_spec=None):
"""Generates HTML that will render a master trace.
This will result in a self-contained "div" element.
Args:
trace: binary-encoded MasterTrace string.
convert_to_unicode: Whether to convert the output to unicode. Defaults to
True because IPython.display.HTML expects unicode, and we expect users to
often pass the output of this function to IPython.display.HTML.
height: CSS string representing the height of the element, default '700px'.
script: Visualization script contents, if the defaults are unacceptable.
master_spec: Master spec proto (parsed), which can improve the layout. May
be required in future versions.
Returns:
unicode or str with HTML contents.
"""
if script is None:
script = _load_viz_script()
json_trace = parse_trace_json(trace)
elt_id, div_html = _container_div(height=height)
as_str = """
<meta charset="utf-8"/>
{div_html}
<script type='text/javascript'>
{script}
visualizeToDiv({json}, "{elt_id}", {master_spec_json});
</script>
""".format(
script=script,
json=json_trace,
master_spec_json=_optional_master_spec_json(master_spec),
elt_id=elt_id,
div_html=div_html)
return unicode(as_str, 'utf-8') if convert_to_unicode else as_str
def open_in_new_window(html, notebook_html_fcn=None, temp_file_basename=None):
"""Opens an HTML visualization in a new window.
This function assumes that the module was loaded when the current working
directory is the IPython/Jupyter notebook root directory. Then it writes a
file ./tmp/_new_window_html/<random-uuid>.html, and returns an HTML display
element, which will call `window.open("/files/<filename>")`. This works
because IPython serves files from the /files root.
Args:
html: HTML to write to a file.
notebook_html_fcn: Function to generate an HTML element; defaults to
IPython.display.HTML (lazily imported).
temp_file_basename: File name to write (defaults to <random-uuid>.html).
Returns:
HTML notebook element, which will trigger the browser to open a new window.
"""
if isinstance(html, unicode):
html = html.encode('utf-8')
if notebook_html_fcn is None:
from IPython import display
notebook_html_fcn = display.HTML
if temp_file_basename is None:
temp_file_basename = '{}.html'.format(str(uuid.uuid4()))
rel_path = os.path.join('tmp', '_new_window_html', temp_file_basename)
abs_path = os.path.join(_IPYTHON_KERNEL_PATH, rel_path)
# Write the file, creating the directory if it doesn't exist.
if not os.path.isdir(os.path.dirname(abs_path)):
os.makedirs(os.path.dirname(abs_path))
with open(abs_path, 'w') as f:
f.write(html)
return notebook_html_fcn("""
<script type='text/javascript'>
window.open("/files/{}");
</script>
""".format(rel_path))
class InteractiveVisualization(object):
"""Helper class for displaying visualizations interactively.
See usage in examples/dragnn/interactive_text_analyzer.ipynb.
"""
def initial_html(self, height='700px', script=None, init_message=None):
"""Returns HTML for a container, which will be populated later.
Args:
height: CSS string representing the height of the element, default
'700px'.
script: Visualization script contents, if the defaults are unacceptable.
init_message: Initial message to display.
Returns:
unicode with HTML contents.
"""
if script is None:
script = _load_viz_script()
if init_message is None:
init_message = 'Type a sentence and press (enter) to see the trace.'
self.elt_id, div_html = _container_div(
height=height, contents='<strong>{}</strong>'.format(init_message))
html = """
<meta charset="utf-8"/>
{div_html}
<script type='text/javascript'>
{script}
</script>
""".format(
script=script, div_html=div_html)
return unicode(html, 'utf-8') # IPython expects unicode.
def show_trace(self, trace, master_spec=None):
"""Returns a JS script HTML fragment, which will populate the container.
Args:
trace: binary-encoded MasterTrace string.
master_spec: Master spec proto (parsed), which can improve the layout. May
be required in future versions.
Returns:
unicode with HTML contents.
"""
html = """
<meta charset="utf-8"/>
<script type='text/javascript'>
document.getElementById("{elt_id}").innerHTML = ""; // Clear previous.
visualizeToDiv({json}, "{elt_id}", {master_spec_json});
</script>
""".format(
json=parse_trace_json(trace),
master_spec_json=_optional_master_spec_json(master_spec),
elt_id=self.elt_id)
return unicode(html, 'utf-8') # IPython expects unicode.
| {
"content_hash": "cc4929ea719e4151be670bd15b93369c",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 80,
"avg_line_length": 32.21658986175115,
"alnum_prop": 0.683021027034759,
"repo_name": "hang-qi/models",
"id": "51be1b5725c336c69200b747b9c964a35c9b271d",
"size": "6991",
"binary": false,
"copies": "2",
"ref": "refs/heads/hemingway",
"path": "syntaxnet/dragnn/python/visualization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "662"
},
{
"name": "C++",
"bytes": "1167920"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "61098"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "24249"
},
{
"name": "Python",
"bytes": "2633576"
},
{
"name": "Shell",
"bytes": "45143"
}
],
"symlink_target": ""
} |
"""Inspectors allow you to visually browse through the members of
various python objects. To open an inspector, import this module, and
execute inspector.inspect(anObject) I start IDLE with this command
line: idle.py -c "from inspector import inspect"
so that I can just type: inspect(anObject) any time."""
__all__ = ['inspect', 'inspectorFor', 'Inspector', 'ModuleInspector', 'ClassInspector', 'InstanceInspector', 'FunctionInspector', 'InstanceMethodInspector', 'CodeInspector', 'ComplexInspector', 'DictionaryInspector', 'SequenceInspector', 'SliceInspector', 'InspectorWindow']
from direct.showbase.TkGlobal import *
from Tkinter import *
import Pmw
### public API
def inspect(anObject):
inspector = inspectorFor(anObject)
inspectorWindow = InspectorWindow(inspector)
inspectorWindow.open()
return inspectorWindow
### private
def inspectorFor(anObject):
typeName = type(anObject).__name__.capitalize() + 'Type'
if typeName in _InspectorMap:
inspectorName = _InspectorMap[typeName]
else:
print("Can't find an inspector for " + typeName)
inspectorName = 'Inspector'
inspector = globals()[inspectorName](anObject)
return inspector
### initializing
def initializeInspectorMap():
global _InspectorMap
notFinishedTypes = ['BufferType', 'EllipsisType', 'FrameType', 'TracebackType', 'XRangeType']
_InspectorMap = {
'Builtin_function_or_methodType': 'FunctionInspector',
'BuiltinFunctionType': 'FunctionInspector',
'BuiltinMethodType': 'FunctionInspector',
'ClassType': 'ClassInspector',
'CodeType': 'CodeInspector',
'ComplexType': 'Inspector',
'DictionaryType': 'DictionaryInspector',
'DictType': 'DictionaryInspector',
'FileType': 'Inspector',
'FloatType': 'Inspector',
'FunctionType': 'FunctionInspector',
'Instance methodType': 'InstanceMethodInspector',
'InstanceType': 'InstanceInspector',
'IntType': 'Inspector',
'LambdaType': 'Inspector',
'ListType': 'SequenceInspector',
'LongType': 'Inspector',
'MethodType': 'FunctionInspector',
'ModuleType': 'ModuleInspector',
'NoneType': 'Inspector',
'SliceType': 'SliceInspector',
'StringType': 'SequenceInspector',
'TupleType': 'SequenceInspector',
'TypeType': 'Inspector',
'UnboundMethodType': 'FunctionInspector'}
for each in notFinishedTypes:
_InspectorMap[each] = 'Inspector'
### Classes
class Inspector:
def __init__(self, anObject):
self.object = anObject
self.lastPartNumber = 0
self.initializePartsList()
self.initializePartNames()
def __str__(self):
return __name__ + '(' + str(self.object) + ')'
def initializePartsList(self):
self._partsList = []
keys = self.namedParts()
keys.sort()
for each in keys:
self._partsList.append(each)
#if not callable(getattr(self.object, each)):
# self._partsList.append(each)
def initializePartNames(self):
self._partNames = ['up'] + [str(each) for each in self._partsList]
def title(self):
"Subclasses may override."
return self.objectType().__name__.capitalize()
def getLastPartNumber(self):
return self.lastPartNumber
def selectedPart(self):
return self.partNumber(self.getLastPartNumber())
def namedParts(self):
return dir(self.object)
def stringForPartNumber(self, partNumber):
object = self.partNumber(partNumber)
doc = None
if callable(object):
try:
doc = object.__doc__
except:
pass
if doc:
return (str(object) + '\n' + str(doc))
else:
return str(object)
def partNumber(self, partNumber):
self.lastPartNumber = partNumber
if partNumber == 0:
return self.object
else:
part = self.privatePartNumber(partNumber)
return getattr(self.object, part)
def inspectorFor(self, part):
return inspectorFor(part)
def privatePartNumber(self, partNumber):
return self._partsList[partNumber - 1]
def partNames(self):
return self._partNames
def objectType(self):
return type(self.object)
###
class ModuleInspector(Inspector):
def namedParts(self):
return ['__dict__']
class ClassInspector(Inspector):
def namedParts(self):
return ['__bases__'] + self.object.__dict__.keys()
def title(self):
return self.object.__name__ + ' Class'
class InstanceInspector(Inspector):
def title(self):
return self.object.__class__.__name__
def namedParts(self):
return ['__class__'] + dir(self.object)
###
class FunctionInspector(Inspector):
def title(self):
return self.object.__name__ + "()"
class InstanceMethodInspector(Inspector):
def title(self):
return str(self.object.im_class) + "." + self.object.__name__ + "()"
class CodeInspector(Inspector):
def title(self):
return str(self.object)
###
class ComplexInspector(Inspector):
def namedParts(self):
return ['real', 'imag']
###
class DictionaryInspector(Inspector):
def initializePartsList(self):
Inspector.initializePartsList(self)
keys = self.object.keys()
keys.sort()
for each in keys:
self._partsList.append(each)
def partNumber(self, partNumber):
self.lastPartNumber = partNumber
if partNumber == 0:
return self.object
key = self.privatePartNumber(partNumber)
if key in self.object:
return self.object[key]
else:
return getattr(self.object, key)
class SequenceInspector(Inspector):
def initializePartsList(self):
Inspector.initializePartsList(self)
for each in range(len(self.object)):
self._partsList.append(each)
def partNumber(self, partNumber):
self.lastPartNumber = partNumber
if partNumber == 0:
return self.object
index = self.privatePartNumber(partNumber)
if type(index) == IntType:
return self.object[index]
else:
return getattr(self.object, index)
class SliceInspector(Inspector):
def namedParts(self):
return ['start', 'stop', 'step']
### Initialization
initializeInspectorMap()
class InspectorWindow:
def __init__(self, inspector):
self.inspectors = [inspector]
def topInspector(self):
return self.inspectors[len(self.inspectors) - 1]
def selectedPart(self):
return self.topInspector().selectedPart()
def inspectedObject(self):
return self.topInspector().object
def open(self):
self.top= Toplevel()
self.top.geometry('650x315')
self.createViews()
self.update()
#Private - view construction
def createViews(self):
self.createMenus()
# Paned widget for dividing two halves
self.framePane = Pmw.PanedWidget(self.top, orient = HORIZONTAL)
self.createListWidget()
self.createTextWidgets()
self.framePane.pack(expand = 1, fill = BOTH)
def setTitle(self):
self.top.title('Inspecting: ' + self.topInspector().title())
def createListWidget(self):
listFrame = self.framePane.add('list')
listWidget = self.listWidget = Pmw.ScrolledListBox(
listFrame, vscrollmode = 'static')
listWidget.pack(side=LEFT, fill=BOTH, expand=1)
# If you click in the list box, take focus so you can navigate
# with the cursor keys
listbox = listWidget.component('listbox')
listbox.bind('<ButtonPress-1>',
lambda e, l = listbox: l.focus_set())
listbox.bind('<ButtonRelease-1>', self.listSelectionChanged)
listbox.bind('<Double-Button-1>', self.popOrDive)
listbox.bind('<ButtonPress-3>', self.popupMenu)
listbox.bind('<KeyRelease-Up>', self.listSelectionChanged)
listbox.bind('<KeyRelease-Down>', self.listSelectionChanged)
listbox.bind('<KeyRelease-Left>', lambda e, s = self: s.pop())
listbox.bind('<KeyRelease-Right>', lambda e, s = self: s.dive())
listbox.bind('<Return>', self.popOrDive)
def createTextWidgets(self):
textWidgetsFrame = self.framePane.add('textWidgets')
self.textPane = Pmw.PanedWidget(textWidgetsFrame, orient = VERTICAL)
textFrame = self.textPane.add('text', size = 200)
self.textWidget = Pmw.ScrolledText(
textFrame, vscrollmode = 'static', text_state = 'disabled')
self.textWidget.pack(fill=BOTH, expand=1)
commandFrame = self.textPane.add('command')
self.commandWidget = Pmw.ScrolledText(
commandFrame, vscrollmode = 'static')
self.commandWidget.insert(1.0, '>>> ')
self.commandWidget.pack(fill = BOTH, expand = 1)
self.commandWidget.component('text').bind(
'<KeyRelease-Return>', self.evalCommand)
self.textPane.pack(expand = 1, fill = BOTH)
def createMenus(self):
self.menuBar = Menu(self.top)
self.top.config(menu=self.menuBar)
inspectMenu = Menu(self.menuBar)
self.menuBar.add_cascade(label='Inspect', menu=inspectMenu)
inspectMenu.add_command(label='Pop', command=self.pop)
inspectMenu.add_command(label='Dive', command=self.dive)
inspectMenu.add_command(label='Inspect', command=self.inspect)
helpMenu = Menu(self.menuBar)
self.menuBar.add_cascade(label='Help', menu=helpMenu)
helpMenu.add_command(label='Instructions', command=self.showHelp)
def fillList(self):
self.listWidget.delete(0, END)
for each in self.topInspector().partNames():
self.listWidget.insert(END, each)
self.listWidget.select_clear(0)
# Event Handling
def listSelectionChanged(self, event):
partNumber = self.selectedIndex()
if partNumber == None:
partNumber = 0
string = self.topInspector().stringForPartNumber(partNumber)
self.textWidget.component('text').configure(state = 'normal')
self.textWidget.delete('1.0', END)
self.textWidget.insert(END, string)
self.textWidget.component('text').configure(state = 'disabled')
def popOrDive(self, event):
"""The list has been double-clicked. If the selection is 'self' then pop,
otherwise dive into the selected part"""
if self.selectedIndex() == 0:
self.pop()
else:
self.dive()
def evalCommand(self, event):
"""Eval text in commandWidget"""
insertPt = self.commandWidget.index(INSERT)
commandLineStart = self.commandWidget.search(
'>>> ', INSERT, backwards = 1)
if commandLineStart:
commandStart = self.commandWidget.index(
commandLineStart + ' + 4 chars')
command = self.commandWidget.get(commandStart,
commandStart + ' lineend')
if command:
partDict = { 'this': self.selectedPart(),
'object': self.topInspector().object }
result = eval(command, partDict)
self.commandWidget.insert(INSERT, repr(result) + '\n>>> ')
self.commandWidget.see(INSERT)
# Menu Events
def inspect(self):
inspector = self.inspectorForSelectedPart()
if inspector == None:
return
InspectorWindow(inspector).open()
def pop(self):
if len(self.inspectors) > 1:
self.inspectors = self.inspectors[:-1]
self.update()
def dive(self):
inspector = self.inspectorForSelectedPart()
if inspector == None:
return
self.inspectors.append(inspector)
self.update()
def update(self):
self.setTitle()
self.fillList()
# What is active part in this inspector
partNumber = self.topInspector().getLastPartNumber()
self.listWidget.select_clear(0)
self.listWidget.activate(partNumber)
self.listWidget.select_set(partNumber)
self.listSelectionChanged(None)
# Make sure selected item is visible
self.listWidget.see(partNumber)
# Make sure left side of listbox visible
self.listWidget.xview_moveto(0.0)
# Grab focus in listbox
self.listWidget.component('listbox').focus_set()
def showHelp(self):
help = Toplevel(tkroot)
help.title("Inspector Help")
frame = Frame(help)
frame.pack()
text = Label(
frame, justify = LEFT,
text = "ListBox shows selected object's attributes\nDouble click or use right arrow on an instance variable to dive down.\nDouble click self or use left arrow to pop back up.\nUse up and down arrow keys to move from item to item in the current level.\n\nValue box (upper right) shows current value of selected item\n\nCommand box (lower right) is used to evaluate python commands\nLocal variables 'object' and 'this' are defined as the current object being inspected\nand the current attribute selected."
)
text.pack()
#Private
def selectedIndex(self):
indicies = map(int, self.listWidget.curselection())
if len(indicies) == 0:
return None
partNumber = indicies[0]
return partNumber
def inspectorForSelectedPart(self):
partNumber = self.selectedIndex()
if partNumber == None:
return None
part = self.topInspector().partNumber(partNumber)
return self.topInspector().inspectorFor(part)
def popupMenu(self, event):
print(event)
partNumber = self.selectedIndex()
print(partNumber)
if partNumber == None:
return
part = self.topInspector().partNumber(partNumber)
print(part)
from panda3d.core import NodePath
from direct.fsm import ClassicFSM
popupMenu = None
if isinstance(part, NodePath):
popupMenu = self.createPopupMenu(
part,
[('Explore', NodePath.explore),
('Place', NodePath.place),
('Set Color', NodePath.rgbPanel)])
elif isinstance(part, ClassicFSM.ClassicFSM):
import FSMInspector
popupMenu = self.createPopupMenu(
part,
[('Inspect ClassicFSM', FSMInspector.FSMInspector)])
print(popupMenu)
if popupMenu:
popupMenu.post(event.widget.winfo_pointerx(),
event.widget.winfo_pointery())
def createPopupMenu(self, part, menuList):
popupMenu = Menu(self.top, tearoff = 0)
for item, func in menuList:
popupMenu.add_command(
label = item,
command = lambda p = part, f = func: f(p))
return popupMenu
| {
"content_hash": "d6fc17845d52ee2231631db13519c127",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 516,
"avg_line_length": 34.39414414414414,
"alnum_prop": 0.6165280597210399,
"repo_name": "hj3938/panda3d",
"id": "2ffa1a5249750ad484f30b594f3eb92ed189d34a",
"size": "15271",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "direct/src/tkpanels/Inspector.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4044"
},
{
"name": "C",
"bytes": "6517520"
},
{
"name": "C++",
"bytes": "31403651"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "Groff",
"bytes": "8017"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "1461"
},
{
"name": "Objective-C",
"bytes": "14559"
},
{
"name": "Objective-C++",
"bytes": "298229"
},
{
"name": "Pascal",
"bytes": "467818"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "337716"
},
{
"name": "Python",
"bytes": "5837581"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
"""Certbot main entry point."""
# pylint: disable=too-many-lines
from contextlib import contextmanager
import functools
import logging.handlers
import sys
from typing import cast
from typing import Generator
from typing import IO
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import TypeVar
from typing import Union
import configobj
import josepy as jose
import zope.component
import zope.interface
from acme import client as acme_client
from acme import errors as acme_errors
from acme import messages as acme_messages
import certbot
from certbot import configuration
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot._internal import account
from certbot._internal import cert_manager
from certbot._internal import cli
from certbot._internal import client
from certbot._internal import constants
from certbot._internal import eff
from certbot._internal import hooks
from certbot._internal import log
from certbot._internal import renewal
from certbot._internal import reporter
from certbot._internal import snap_config
from certbot._internal import storage
from certbot._internal import updater
from certbot._internal.display import obj as display_obj
from certbot._internal.display import util as internal_display_util
from certbot._internal.plugins import disco as plugins_disco
from certbot._internal.plugins import selection as plug_sel
from certbot.compat import filesystem
from certbot.compat import misc
from certbot.compat import os
from certbot.display import ops as display_ops
from certbot.display import util as display_util
from certbot.plugins import enhancements
USER_CANCELLED = ("User chose to cancel the operation and may "
"reinvoke the client.")
logger = logging.getLogger(__name__)
def _suggest_donation_if_appropriate(config: configuration.NamespaceConfig) -> None:
"""Potentially suggest a donation to support Certbot.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:returns: `None`
:rtype: None
"""
# don't prompt for donation if:
# - renewing
# - using the staging server (--staging or --dry-run)
# - running with --quiet (display fd won't be available during atexit calls #8995)
assert config.verb != "renew"
if config.staging or config.quiet:
return
util.atexit_register(
display_util.notification,
"If you like Certbot, please consider supporting our work by:\n"
" * Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate\n"
" * Donating to EFF: https://eff.org/donate-le",
pause=False
)
def _get_and_save_cert(le_client: client.Client, config: configuration.NamespaceConfig,
domains: Optional[List[str]] = None, certname: Optional[str] = None,
lineage: Optional[storage.RenewableCert] = None
) -> Optional[storage.RenewableCert]:
"""Authenticate and enroll certificate.
This method finds the relevant lineage, figures out what to do with it,
then performs that action. Includes calls to hooks, various reports,
checks, and requests for user input.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param domains: List of domain names to get a certificate. Defaults to `None`
:type domains: `list` of `str`
:param certname: Name of new certificate. Defaults to `None`
:type certname: str
:param lineage: Certificate lineage object. Defaults to `None`
:type lineage: storage.RenewableCert
:returns: the issued certificate or `None` if doing a dry run
:rtype: storage.RenewableCert or None
:raises errors.Error: if certificate could not be obtained
"""
hooks.pre_hook(config)
try:
if lineage is not None:
# Renewal, where we already know the specific lineage we're
# interested in
display_util.notify(
"{action} for {domains}".format(
action="Simulating renewal of an existing certificate"
if config.dry_run else "Renewing an existing certificate",
domains=internal_display_util.summarize_domain_list(domains or lineage.names())
)
)
renewal.renew_cert(config, domains, le_client, lineage)
else:
# TREAT AS NEW REQUEST
if domains is None:
raise errors.Error("Domain list cannot be none if the lineage is not set.")
display_util.notify(
"{action} for {domains}".format(
action="Simulating a certificate request" if config.dry_run else
"Requesting a certificate",
domains=internal_display_util.summarize_domain_list(domains)
)
)
lineage = le_client.obtain_and_enroll_certificate(domains, certname)
if lineage is False:
raise errors.Error("Certificate could not be obtained")
if lineage is not None:
hooks.deploy_hook(config, lineage.names(), lineage.live_dir)
finally:
hooks.post_hook(config)
return lineage
def _handle_unexpected_key_type_migration(config: configuration.NamespaceConfig,
cert: storage.RenewableCert) -> None:
"""
This function ensures that the user will not implicitly migrate an existing key
from one type to another in the situation where a certificate for that lineage
already exist and they have not provided explicitly --key-type and --cert-name.
:param config: Current configuration provided by the client
:param cert: Matching certificate that could be renewed
"""
new_key_type = config.key_type.upper()
cur_key_type = cert.private_key_type.upper()
if new_key_type == cur_key_type:
return
# If both --key-type and --cert-name are provided, we consider the user's intent to
# be unambiguous: to change the key type of this lineage.
is_confirmed_via_cli = cli.set_by_cli("key_type") and cli.set_by_cli("certname")
# Failing that, we interactively prompt the user to confirm the change.
if is_confirmed_via_cli or display_util.yesno(
f'An {cur_key_type} certificate named {cert.lineagename} already exists. Do you want to '
f'update its key type to {new_key_type}?',
yes_label='Update key type', no_label='Keep existing key type',
default=False, force_interactive=False,
):
return
# If --key-type was set on the CLI but the user did not confirm the key type change using
# one of the two above methods, their intent is ambiguous. Error out.
if cli.set_by_cli("key_type"):
raise errors.Error(
'Are you trying to change the key type of the certificate named '
f'{cert.lineagename} from {cur_key_type} to {new_key_type}? Please provide '
'both --cert-name and --key-type on the command line to confirm the change '
'you are trying to make.'
)
# The mismatch between the lineage's key type and config.key_type is caused by Certbot's
# default value. The user is not asking for a key change: keep the key type of the existing
# lineage.
config.key_type = cur_key_type.lower()
def _handle_subset_cert_request(config: configuration.NamespaceConfig,
domains: Iterable[str],
cert: storage.RenewableCert
) -> Tuple[str, Optional[storage.RenewableCert]]:
"""Figure out what to do if a previous cert had a subset of the names now requested
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param domains: List of domain names
:type domains: `list` of `str`
:param cert: Certificate object
:type cert: storage.RenewableCert
:returns: Tuple of (str action, cert_or_None) as per _find_lineage_for_domains_and_certname
action can be: "newcert" | "renew" | "reinstall"
:rtype: `tuple` of `str`
"""
_handle_unexpected_key_type_migration(config, cert)
existing = ", ".join(cert.names())
question = (
"You have an existing certificate that contains a portion of "
"the domains you requested (ref: {0}){br}{br}It contains these "
"names: {1}{br}{br}You requested these names for the new "
"certificate: {2}.{br}{br}Do you want to expand and replace this existing "
"certificate with the new certificate?"
).format(cert.configfile.filename,
existing,
", ".join(domains),
br=os.linesep)
if config.expand or config.renew_by_default or display_util.yesno(
question, "Expand", "Cancel", cli_flag="--expand", force_interactive=True):
return "renew", cert
display_util.notify(
"To obtain a new certificate that contains these names without "
"replacing your existing certificate for {0}, you must use the "
"--duplicate option.{br}{br}"
"For example:{br}{br}{1} --duplicate {2}".format(
existing,
cli.cli_command, " ".join(sys.argv[1:]),
br=os.linesep
))
raise errors.Error(USER_CANCELLED)
def _handle_identical_cert_request(config: configuration.NamespaceConfig,
lineage: storage.RenewableCert,
) -> Tuple[str, Optional[storage.RenewableCert]]:
"""Figure out what to do if a lineage has the same names as a previously obtained one
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param lineage: Certificate lineage object
:type lineage: storage.RenewableCert
:returns: Tuple of (str action, cert_or_None) as per _find_lineage_for_domains_and_certname
action can be: "newcert" | "renew" | "reinstall"
:rtype: `tuple` of `str`
"""
_handle_unexpected_key_type_migration(config, lineage)
if not lineage.ensure_deployed():
return "reinstall", lineage
if renewal.should_renew(config, lineage):
return "renew", lineage
if config.reinstall:
# Set with --reinstall, force an identical certificate to be
# reinstalled without further prompting.
return "reinstall", lineage
question = (
"You have an existing certificate that has exactly the same "
"domains or certificate name you requested and isn't close to expiry."
"{br}(ref: {0}){br}{br}What would you like to do?"
).format(lineage.configfile.filename, br=os.linesep)
if config.verb == "run":
keep_opt = "Attempt to reinstall this existing certificate"
elif config.verb == "certonly":
keep_opt = "Keep the existing certificate for now"
choices = [keep_opt,
"Renew & replace the certificate (may be subject to CA rate limits)"]
response = display_util.menu(question, choices,
default=0, force_interactive=True)
if response[0] == display_util.CANCEL:
# TODO: Add notification related to command-line options for
# skipping the menu for this case.
raise errors.Error(
"Operation canceled. You may re-run the client.")
if response[1] == 0:
return "reinstall", lineage
elif response[1] == 1:
return "renew", lineage
raise AssertionError('This is impossible')
def _find_lineage_for_domains(config: configuration.NamespaceConfig, domains: List[str]
) -> Tuple[Optional[str], Optional[storage.RenewableCert]]:
"""Determine whether there are duplicated names and how to handle
them (renew, reinstall, newcert, or raising an error to stop
the client run if the user chooses to cancel the operation when
prompted).
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param domains: List of domain names
:type domains: `list` of `str`
:returns: Two-element tuple containing desired new-certificate behavior as
a string token ("reinstall", "renew", or "newcert"), plus either
a RenewableCert instance or `None` if renewal shouldn't occur.
:rtype: `tuple` of `str` and :class:`storage.RenewableCert` or `None`
:raises errors.Error: If the user would like to rerun the client again.
"""
# Considering the possibility that the requested certificate is
# related to an existing certificate. (config.duplicate, which
# is set with --duplicate, skips all of this logic and forces any
# kind of certificate to be obtained with renewal = False.)
if config.duplicate:
return "newcert", None
# TODO: Also address superset case
ident_names_cert, subset_names_cert = cert_manager.find_duplicative_certs(config, domains)
# XXX ^ schoen is not sure whether that correctly reads the systemwide
# configuration file.
if ident_names_cert is None and subset_names_cert is None:
return "newcert", None
if ident_names_cert is not None:
return _handle_identical_cert_request(config, ident_names_cert)
elif subset_names_cert is not None:
return _handle_subset_cert_request(config, domains, subset_names_cert)
return None, None
def _find_cert(config: configuration.NamespaceConfig, domains: List[str], certname: str
) -> Tuple[bool, Optional[storage.RenewableCert]]:
"""Finds an existing certificate object given domains and/or a certificate name.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param domains: List of domain names
:type domains: `list` of `str`
:param certname: Name of certificate
:type certname: str
:returns: Two-element tuple of a boolean that indicates if this function should be
followed by a call to fetch a certificate from the server, and either a
RenewableCert instance or None.
:rtype: `tuple` of `bool` and :class:`storage.RenewableCert` or `None`
"""
action, lineage = _find_lineage_for_domains_and_certname(config, domains, certname)
if action == "reinstall":
logger.info("Keeping the existing certificate")
return (action != "reinstall"), lineage
def _find_lineage_for_domains_and_certname(
config: configuration.NamespaceConfig, domains: List[str],
certname: str) -> Tuple[Optional[str], Optional[storage.RenewableCert]]:
"""Find appropriate lineage based on given domains and/or certname.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param domains: List of domain names
:type domains: `list` of `str`
:param certname: Name of certificate
:type certname: str
:returns: Two-element tuple containing desired new-certificate behavior as
a string token ("reinstall", "renew", or "newcert"), plus either
a RenewableCert instance or None if renewal should not occur.
:rtype: `tuple` of `str` and :class:`storage.RenewableCert` or `None`
:raises errors.Error: If the user would like to rerun the client again.
"""
if not certname:
return _find_lineage_for_domains(config, domains)
lineage = cert_manager.lineage_for_certname(config, certname)
if lineage:
if domains:
computed_domains = cert_manager.domains_for_certname(config, certname)
if computed_domains and set(computed_domains) != set(domains):
_handle_unexpected_key_type_migration(config, lineage)
_ask_user_to_confirm_new_names(config, domains, certname,
lineage.names()) # raises if no
return "renew", lineage
# unnecessarily specified domains or no domains specified
return _handle_identical_cert_request(config, lineage)
elif domains:
return "newcert", None
raise errors.ConfigurationError("No certificate with name {0} found. "
"Use -d to specify domains, or run certbot certificates to see "
"possible certificate names.".format(certname))
T = TypeVar("T")
def _get_added_removed(after: Iterable[T], before: Iterable[T]) -> Tuple[List[T], List[T]]:
"""Get lists of items removed from `before`
and a lists of items added to `after`
"""
added = list(set(after) - set(before))
removed = list(set(before) - set(after))
added.sort()
removed.sort()
return added, removed
def _format_list(character: str, strings: Iterable[str]) -> str:
"""Format list with given character
"""
if not strings:
formatted = "{br}(None)"
else:
formatted = "{br}{ch} " + "{br}{ch} ".join(strings)
return formatted.format(
ch=character,
br=os.linesep
)
def _ask_user_to_confirm_new_names(config: configuration.NamespaceConfig,
new_domains: Iterable[str], certname: str,
old_domains: Iterable[str]) -> None:
"""Ask user to confirm update cert certname to contain new_domains.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param new_domains: List of new domain names
:type new_domains: `list` of `str`
:param certname: Name of certificate
:type certname: str
:param old_domains: List of old domain names
:type old_domains: `list` of `str`
:returns: None
:rtype: None
:raises errors.ConfigurationError: if cert name and domains mismatch
"""
if config.renew_with_new_domains:
return
added, removed = _get_added_removed(new_domains, old_domains)
msg = ("You are updating certificate {0} to include new domain(s): {1}{br}{br}"
"You are also removing previously included domain(s): {2}{br}{br}"
"Did you intend to make this change?".format(
certname,
_format_list("+", added),
_format_list("-", removed),
br=os.linesep))
if not display_util.yesno(msg, "Update certificate", "Cancel", default=True):
raise errors.ConfigurationError("Specified mismatched certificate name and domains.")
def _find_domains_or_certname(config: configuration.NamespaceConfig,
installer: Optional[interfaces.Installer],
question: Optional[str] = None) -> Tuple[List[str], str]:
"""Retrieve domains and certname from config or user input.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param installer: Installer object
:type installer: interfaces.Installer
:param `str` question: Overriding default question to ask the user if asked
to choose from domain names.
:returns: Two-part tuple of domains and certname
:rtype: `tuple` of list of `str` and `str`
:raises errors.Error: Usage message, if parameters are not used correctly
"""
domains = None
certname = config.certname
# first, try to get domains from the config
if config.domains:
domains = config.domains
# if we can't do that but we have a certname, get the domains
# with that certname
elif certname:
domains = cert_manager.domains_for_certname(config, certname)
# that certname might not have existed, or there was a problem.
# try to get domains from the user.
if not domains:
domains = display_ops.choose_names(installer, question)
if not domains and not certname:
raise errors.Error("Please specify --domains, or --installer that "
"will help in domain names autodiscovery, or "
"--cert-name for an existing certificate name.")
return domains, certname
def _report_next_steps(config: configuration.NamespaceConfig, installer_err: Optional[errors.Error],
lineage: Optional[storage.RenewableCert],
new_or_renewed_cert: bool = True) -> None:
"""Displays post-run/certonly advice to the user about renewal and installation.
The output varies by runtime configuration and any errors encountered during installation.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param installer_err: The installer/enhancement error encountered, if any.
:type error: Optional[errors.Error]
:param lineage: The resulting certificate lineage from the issuance, if any.
:type lineage: Optional[storage.RenewableCert]
:param bool new_or_renewed_cert: Whether the verb execution resulted in a certificate
being saved (created or renewed).
"""
steps: List[str] = []
# If the installation or enhancement raised an error, show advice on trying again
if installer_err:
# Special case where either --nginx or --apache were used, causing us to
# run the "installer" (i.e. reloading the nginx/apache config)
if config.verb == 'certonly':
steps.append(
"The certificate was saved, but was not successfully loaded by the installer "
f"({config.installer}) due to the installer failing to reload. "
f"After fixing the error shown below, try reloading {config.installer} manually."
)
else:
steps.append(
"The certificate was saved, but could not be installed (installer: "
f"{config.installer}). After fixing the error shown below, try installing it again "
f"by running:\n {cli.cli_command} install --cert-name "
f"{_cert_name_from_config_or_lineage(config, lineage)}"
)
# If a certificate was obtained or renewed, show applicable renewal advice
if new_or_renewed_cert:
if config.csr:
steps.append(
"Certificates created using --csr will not be renewed automatically by Certbot. "
"You will need to renew the certificate before it expires, by running the same "
"Certbot command again.")
elif _is_interactive_only_auth(config):
steps.append(
"This certificate will not be renewed automatically. Autorenewal of "
"--manual certificates requires the use of an authentication hook script "
"(--manual-auth-hook) but one was not provided. To renew this certificate, repeat "
f"this same {cli.cli_command} command before the certificate's expiry date."
)
elif not config.preconfigured_renewal:
steps.append(
"The certificate will need to be renewed before it expires. Certbot can "
"automatically renew the certificate in the background, but you may need "
"to take steps to enable that functionality. "
"See https://certbot.org/renewal-setup for instructions.")
if not steps:
return
# TODO: refactor ANSI escapes during https://github.com/certbot/certbot/issues/8848
(bold_on, nl, bold_off) = [c if sys.stdout.isatty() and not config.quiet else '' \
for c in (util.ANSI_SGR_BOLD, '\n', util.ANSI_SGR_RESET)]
print(bold_on, end=nl)
display_util.notify("NEXT STEPS:")
print(bold_off, end='')
for step in steps:
display_util.notify(f"- {step}")
# If there was an installer error, segregate the error output with a trailing newline
if installer_err:
print()
def _report_new_cert(config: configuration.NamespaceConfig, cert_path: Optional[str],
fullchain_path: Optional[str], key_path: Optional[str] = None) -> None:
"""Reports the creation of a new certificate to the user.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param cert_path: path to certificate
:type cert_path: str
:param fullchain_path: path to full chain
:type fullchain_path: str
:param key_path: path to private key, if available
:type key_path: str
:returns: `None`
:rtype: None
"""
if config.dry_run:
display_util.notify("The dry run was successful.")
return
assert cert_path and fullchain_path, "No certificates saved to report."
renewal_msg = ""
if config.preconfigured_renewal and not _is_interactive_only_auth(config):
renewal_msg = ("\nCertbot has set up a scheduled task to automatically renew this "
"certificate in the background.")
display_util.notify(
("\nSuccessfully received certificate.\n"
"Certificate is saved at: {cert_path}\n{key_msg}"
"This certificate expires on {expiry}.\n"
"These files will be updated when the certificate renews.{renewal_msg}{nl}").format(
cert_path=fullchain_path,
expiry=crypto_util.notAfter(cert_path).date(),
key_msg="Key is saved at: {}\n".format(key_path) if key_path else "",
renewal_msg=renewal_msg,
nl="\n" if config.verb == "run" else "" # Normalize spacing across verbs
)
)
def _is_interactive_only_auth(config: configuration.NamespaceConfig) -> bool:
""" Whether the current authenticator params only support interactive renewal.
"""
# --manual without --manual-auth-hook can never autorenew
if config.authenticator == "manual" and config.manual_auth_hook is None:
return True
return False
def _csr_report_new_cert(config: configuration.NamespaceConfig, cert_path: Optional[str],
chain_path: Optional[str], fullchain_path: Optional[str]) -> None:
""" --csr variant of _report_new_cert.
Until --csr is overhauled (#8332) this is transitional function to report the creation
of a new certificate using --csr.
TODO: remove this function and just call _report_new_cert when --csr is overhauled.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param str cert_path: path to cert.pem
:param str chain_path: path to chain.pem
:param str fullchain_path: path to fullchain.pem
"""
if config.dry_run:
display_util.notify("The dry run was successful.")
return
assert cert_path and fullchain_path, "No certificates saved to report."
expiry = crypto_util.notAfter(cert_path).date()
display_util.notify(
("\nSuccessfully received certificate.\n"
"Certificate is saved at: {cert_path}\n"
"Intermediate CA chain is saved at: {chain_path}\n"
"Full certificate chain is saved at: {fullchain_path}\n"
"This certificate expires on {expiry}.").format(
cert_path=cert_path, chain_path=chain_path,
fullchain_path=fullchain_path, expiry=expiry,
)
)
def _determine_account(config: configuration.NamespaceConfig
) -> Tuple[account.Account,
Optional[acme_client.ClientV2]]:
"""Determine which account to use.
If ``config.account`` is ``None``, it will be updated based on the
user input. Same for ``config.email``.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:returns: Account and optionally ACME client API (biproduct of new
registration).
:rtype: tuple of :class:`certbot._internal.account.Account` and :class:`acme.client.Client`
:raises errors.Error: If unable to register an account with ACME server
"""
def _tos_cb(terms_of_service: str) -> None:
if config.tos:
return
msg = ("Please read the Terms of Service at {0}. You "
"must agree in order to register with the ACME "
"server. Do you agree?".format(terms_of_service))
result = display_util.yesno(msg, cli_flag="--agree-tos", force_interactive=True)
if not result:
raise errors.Error(
"Registration cannot proceed without accepting "
"Terms of Service.")
account_storage = account.AccountFileStorage(config)
acme: Optional[acme_client.ClientV2] = None
if config.account is not None:
acc = account_storage.load(config.account)
else:
accounts = account_storage.find_all()
if len(accounts) > 1:
potential_acc = display_ops.choose_account(accounts)
if not potential_acc:
raise errors.Error("No account has been chosen.")
acc = potential_acc
elif len(accounts) == 1:
acc = accounts[0]
else: # no account registered yet
if config.email is None and not config.register_unsafely_without_email:
config.email = display_ops.get_email()
try:
acc, acme = client.register(
config, account_storage, tos_cb=_tos_cb)
display_util.notify("Account registered.")
except errors.MissingCommandlineFlag:
raise
except (errors.Error, acme_messages.Error) as err:
logger.debug("", exc_info=True)
if acme_messages.is_acme_error(err):
err_msg = internal_display_util.describe_acme_error(
cast(acme_messages.Error, err))
err_msg = f"Error returned by the ACME server: {err_msg}"
else:
err_msg = str(err)
raise errors.Error(
f"Unable to register an account with ACME server. {err_msg}")
config.account = acc.id
return acc, acme
def _delete_if_appropriate(config: configuration.NamespaceConfig) -> None:
"""Does the user want to delete their now-revoked certs? If run in non-interactive mode,
deleting happens automatically.
:param config: parsed command line arguments
:type config: configuration.NamespaceConfig
:returns: `None`
:rtype: None
:raises errors.Error: If anything goes wrong, including bad user input, if an overlapping
archive dir is found for the specified lineage, etc ...
"""
attempt_deletion = config.delete_after_revoke
if attempt_deletion is None:
msg = ("Would you like to delete the certificate(s) you just revoked, "
"along with all earlier and later versions of the certificate?")
attempt_deletion = display_util.yesno(msg, yes_label="Yes (recommended)", no_label="No",
force_interactive=True, default=True)
if not attempt_deletion:
return
# config.cert_path must have been set
# config.certname may have been set
assert config.cert_path
if not config.certname:
config.certname = cert_manager.cert_path_to_lineage(config)
# don't delete if the archive_dir is used by some other lineage
archive_dir = storage.full_archive_path(
configobj.ConfigObj(
storage.renewal_file_for_certname(config, config.certname),
encoding='utf-8', default_encoding='utf-8'),
config, config.certname)
try:
cert_manager.match_and_check_overlaps(config, [lambda x: archive_dir],
lambda x: x.archive_dir, lambda x: x.lineagename)
except errors.OverlappingMatchFound:
logger.warning("Not deleting revoked certificates due to overlapping archive dirs. "
"More than one certificate is using %s", archive_dir)
return
except Exception as e:
msg = ('config.default_archive_dir: {0}, config.live_dir: {1}, archive_dir: {2},'
'original exception: {3}')
msg = msg.format(config.default_archive_dir, config.live_dir, archive_dir, e)
raise errors.Error(msg)
cert_manager.delete(config)
def _init_le_client(config: configuration.NamespaceConfig,
authenticator: Optional[interfaces.Authenticator],
installer: Optional[interfaces.Installer]) -> client.Client:
"""Initialize Let's Encrypt Client
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param authenticator: Acme authentication handler
:type authenticator: Optional[interfaces.Authenticator]
:param installer: Installer object
:type installer: interfaces.Installer
:returns: client: Client object
:rtype: client.Client
"""
acc: Optional[account.Account]
if authenticator is not None:
# if authenticator was given, then we will need account...
acc, acme = _determine_account(config)
logger.debug("Picked account: %r", acc)
else:
acc, acme = None, None
return client.Client(config, acc, authenticator, installer, acme=acme)
def unregister(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Deactivate account on server
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating an error
:rtype: None or str
"""
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
if not accounts:
return f"Could not find existing account for server {config.server}."
prompt = ("Are you sure you would like to irrevocably deactivate "
"your account?")
wants_deactivate = display_util.yesno(prompt, yes_label='Deactivate', no_label='Abort',
default=True)
if not wants_deactivate:
return "Deactivation aborted."
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
if not cb_client.acme:
raise errors.Error("ACME client is not set.")
# delete on boulder
cb_client.acme.deactivate_registration(acc.regr)
account_files = account.AccountFileStorage(config)
# delete local account files
account_files.delete(config.account)
display_util.notify("Account deactivated.")
return None
def register(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Create accounts on the server.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating an error
:rtype: None or str
"""
# Portion of _determine_account logic to see whether accounts already
# exist or not.
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
if accounts:
# TODO: add a flag to register a duplicate account (this will
# also require extending _determine_account's behavior
# or else extracting the registration code from there)
return ("There is an existing account; registration of a "
"duplicate account with this command is currently "
"unsupported.")
# _determine_account will register an account
_determine_account(config)
return None
def update_account(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Modify accounts on the server.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating an error
:rtype: None or str
"""
# Portion of _determine_account logic to see whether accounts already
# exist or not.
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
if not accounts:
return f"Could not find an existing account for server {config.server}."
if config.email is None and not config.register_unsafely_without_email:
config.email = display_ops.get_email(optional=False)
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
if not cb_client.acme:
raise errors.Error("ACME client is not set.")
# Empty list of contacts in case the user is removing all emails
acc_contacts: Iterable[str] = ()
if config.email:
acc_contacts = ['mailto:' + email for email in config.email.split(',')]
# We rely on an exception to interrupt this process if it didn't work.
prev_regr_uri = acc.regr.uri
acc.regr = cb_client.acme.update_registration(acc.regr.update(
body=acc.regr.body.update(contact=acc_contacts)))
# A v1 account being used as a v2 account will result in changing the uri to
# the v2 uri. Since it's the same object on disk, put it back to the v1 uri
# so that we can also continue to use the account object with acmev1.
acc.regr = acc.regr.update(uri=prev_regr_uri)
account_storage.update_regr(acc, cb_client.acme)
if not config.email:
display_util.notify("Any contact information associated "
"with this account has been removed.")
else:
eff.prepare_subscription(config, acc)
display_util.notify("Your e-mail address was updated to {0}.".format(config.email))
return None
def show_account(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Fetch account info from the ACME server and show it to the user.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating an error
:rtype: None or str
"""
# Portion of _determine_account logic to see whether accounts already
# exist or not.
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
if not accounts:
return f"Could not find an existing account for server {config.server}."
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
if not cb_client.acme:
raise errors.Error("ACME client is not set.")
regr = cb_client.acme.query_registration(acc.regr)
output = [f"Account details for server {config.server}:",
f" Account URL: {regr.uri}"]
emails = []
for contact in regr.body.contact:
if contact.startswith('mailto:'):
emails.append(contact[7:])
output.append(" Email contact{}: {}".format(
"s" if len(emails) > 1 else "",
", ".join(emails) if len(emails) > 0 else "none"))
display_util.notify("\n".join(output))
return None
def _cert_name_from_config_or_lineage(config: configuration.NamespaceConfig,
lineage: Optional[storage.RenewableCert]) -> Optional[str]:
if lineage:
return lineage.lineagename
elif config.certname:
return config.certname
try:
cert_name = cert_manager.cert_path_to_lineage(config)
return cert_name
except errors.Error:
pass
return None
def _install_cert(config: configuration.NamespaceConfig, le_client: client.Client,
domains: List[str], lineage: Optional[storage.RenewableCert] = None) -> None:
"""Install a cert
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param le_client: Client object
:type le_client: client.Client
:param domains: List of domains
:type domains: `list` of `str`
:param lineage: Certificate lineage object. Defaults to `None`
:type lineage: storage.RenewableCert
:returns: `None`
:rtype: None
"""
path_provider: Union[storage.RenewableCert,
configuration.NamespaceConfig] = lineage if lineage else config
assert path_provider.cert_path is not None
le_client.deploy_certificate(domains, path_provider.key_path, path_provider.cert_path,
path_provider.chain_path, path_provider.fullchain_path)
le_client.enhance_config(domains, path_provider.chain_path)
def install(config: configuration.NamespaceConfig,
plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Install a previously obtained cert in a server.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None` or the error message
:rtype: None or str
"""
# XXX: Update for renewer/RenewableCert
# FIXME: be consistent about whether errors are raised or returned from
# this function ...
try:
installer, _ = plug_sel.choose_configurator_plugins(config, plugins, "install")
except errors.PluginSelectionError as e:
return str(e)
custom_cert = (config.key_path and config.cert_path)
if not config.certname and not custom_cert:
certname_question = "Which certificate would you like to install?"
config.certname = cert_manager.get_certnames(
config, "install", allow_multiple=False,
custom_prompt=certname_question)[0]
if not enhancements.are_supported(config, installer):
raise errors.NotSupportedError("One ore more of the requested enhancements "
"are not supported by the selected installer")
# If cert-path is defined, populate missing (ie. not overridden) values.
# Unfortunately this can't be done in argument parser, as certificate
# manager needs the access to renewal directory paths
if config.certname:
config = _populate_from_certname(config)
elif enhancements.are_requested(config):
# Preflight config check
raise errors.ConfigurationError("One or more of the requested enhancements "
"require --cert-name to be provided")
if config.key_path and config.cert_path:
_check_certificate_and_key(config)
domains, _ = _find_domains_or_certname(config, installer)
le_client = _init_le_client(config, authenticator=None, installer=installer)
_install_cert(config, le_client, domains)
else:
raise errors.ConfigurationError("Path to certificate or key was not defined. "
"If your certificate is managed by Certbot, please use --cert-name "
"to define which certificate you would like to install.")
if enhancements.are_requested(config):
# In the case where we don't have certname, we have errored out already
lineage = cert_manager.lineage_for_certname(config, config.certname)
enhancements.enable(lineage, domains, installer, config)
return None
def _populate_from_certname(config: configuration.NamespaceConfig) -> configuration.NamespaceConfig:
"""Helper function for install to populate missing config values from lineage
defined by --cert-name."""
lineage = cert_manager.lineage_for_certname(config, config.certname)
if not lineage:
return config
if not config.key_path:
config.namespace.key_path = lineage.key_path
if not config.cert_path:
config.namespace.cert_path = lineage.cert_path
if not config.chain_path:
config.namespace.chain_path = lineage.chain_path
if not config.fullchain_path:
config.namespace.fullchain_path = lineage.fullchain_path
return config
def _check_certificate_and_key(config: configuration.NamespaceConfig) -> None:
if not os.path.isfile(filesystem.realpath(config.cert_path)):
raise errors.ConfigurationError("Error while reading certificate from path "
"{0}".format(config.cert_path))
if not os.path.isfile(filesystem.realpath(config.key_path)):
raise errors.ConfigurationError("Error while reading private key from path "
"{0}".format(config.key_path))
def plugins_cmd(config: configuration.NamespaceConfig,
plugins: plugins_disco.PluginsRegistry) -> None:
"""List server software plugins.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
logger.debug("Expected interfaces: %s", config.ifaces)
ifaces = [] if config.ifaces is None else config.ifaces
filtered = plugins.visible().ifaces(ifaces)
logger.debug("Filtered plugins: %r", filtered)
notify = functools.partial(display_util.notification, pause=False)
if not config.init and not config.prepare:
notify(str(filtered))
return
filtered.init(config)
verified = filtered.verify(ifaces)
logger.debug("Verified plugins: %r", verified)
if not config.prepare:
notify(str(verified))
return
verified.prepare()
available = verified.available()
logger.debug("Prepared plugins: %s", available)
notify(str(available))
def enhance(config: configuration.NamespaceConfig,
plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Add security enhancements to existing configuration
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating an error
:rtype: None or str
"""
supported_enhancements = ["hsts", "redirect", "uir", "staple"]
# Check that at least one enhancement was requested on command line
oldstyle_enh = any(getattr(config, enh) for enh in supported_enhancements)
if not enhancements.are_requested(config) and not oldstyle_enh:
msg = ("Please specify one or more enhancement types to configure. To list "
"the available enhancement types, run:\n\n%s --help enhance\n")
logger.error(msg, cli.cli_command)
raise errors.MisconfigurationError("No enhancements requested, exiting.")
try:
installer, _ = plug_sel.choose_configurator_plugins(config, plugins, "enhance")
except errors.PluginSelectionError as e:
return str(e)
if not enhancements.are_supported(config, installer):
raise errors.NotSupportedError("One ore more of the requested enhancements "
"are not supported by the selected installer")
certname_question = ("Which certificate would you like to use to enhance "
"your configuration?")
config.certname = cert_manager.get_certnames(
config, "enhance", allow_multiple=False,
custom_prompt=certname_question)[0]
cert_domains = cert_manager.domains_for_certname(config, config.certname)
if cert_domains is None:
raise errors.Error("Could not find the list of domains for the given certificate name.")
if config.noninteractive_mode:
domains = cert_domains
else:
domain_question = ("Which domain names would you like to enable the "
"selected enhancements for?")
domains = display_ops.choose_values(cert_domains, domain_question)
if not domains:
raise errors.Error("User cancelled the domain selection. No domains "
"defined, exiting.")
lineage = cert_manager.lineage_for_certname(config, config.certname)
if not lineage:
raise errors.Error("Could not find the lineage for the given certificate name.")
if not config.chain_path:
config.chain_path = lineage.chain_path
if oldstyle_enh:
le_client = _init_le_client(config, authenticator=None, installer=installer)
le_client.enhance_config(domains, config.chain_path, redirect_default=False)
if enhancements.are_requested(config):
enhancements.enable(lineage, domains, installer, config)
return None
def rollback(config: configuration.NamespaceConfig, plugins: plugins_disco.PluginsRegistry) -> None:
"""Rollback server configuration changes made during install.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
client.rollback(config.installer, config.checkpoints, config, plugins)
def update_symlinks(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> None:
"""Update the certificate file family symlinks
Use the information in the config file to make symlinks point to
the correct archive directory.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
cert_manager.update_live_symlinks(config)
def rename(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> None:
"""Rename a certificate
Use the information in the config file to rename an existing
lineage.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
cert_manager.rename_lineage(config)
def delete(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> None:
"""Delete a certificate
Use the information in the config file to delete an existing
lineage.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
cert_manager.delete(config)
def certificates(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> None:
"""Display information about certs configured with Certbot
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
cert_manager.certificates(config)
def revoke(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Revoke a previously obtained certificate.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None` or string indicating error in case of error
:rtype: None or str
"""
# For user-agent construction
config.installer = config.authenticator = None
if config.cert_path is None and config.certname:
# When revoking via --cert-name, take the cert path and server from renewalparams
lineage = storage.RenewableCert(
storage.renewal_file_for_certname(config, config.certname), config)
config.cert_path = lineage.cert_path
# --server takes priority over lineage.server
if lineage.server and not cli.set_by_cli("server"):
config.server = lineage.server
elif not config.cert_path or (config.cert_path and config.certname):
# intentionally not supporting --cert-path & --cert-name together,
# to avoid dealing with mismatched values
raise errors.Error("Error! Exactly one of --cert-path or --cert-name must be specified!")
if config.key_path is not None: # revocation by cert key
logger.debug("Revoking %s using certificate key %s",
config.cert_path, config.key_path)
crypto_util.verify_cert_matches_priv_key(config.cert_path, config.key_path)
with open(config.key_path, 'rb') as f:
key = jose.JWK.load(f.read())
acme = client.acme_from_config_key(config, key)
else: # revocation by account key
logger.debug("Revoking %s using Account Key", config.cert_path)
acc, _ = _determine_account(config)
acme = client.acme_from_config_key(config, acc.key, acc.regr)
with open(config.cert_path, 'rb') as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
logger.debug("Reason code for revocation: %s", config.reason)
try:
acme.revoke(jose.ComparableX509(cert), config.reason)
_delete_if_appropriate(config)
except acme_errors.ClientError as e:
return str(e)
display_ops.success_revocation(config.cert_path)
return None
def run(config: configuration.NamespaceConfig,
plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Obtain a certificate and install.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
# TODO: Make run as close to auth + install as possible
# Possible difficulties: config.csr was hacked into auth
try:
installer, authenticator = plug_sel.choose_configurator_plugins(config, plugins, "run")
except errors.PluginSelectionError as e:
return str(e)
if config.must_staple and installer and "staple-ocsp" not in installer.supported_enhancements():
raise errors.NotSupportedError(
"Must-Staple extension requested, but OCSP stapling is not supported by the selected "
f"installer ({config.installer})\n\n"
"You can either:\n"
" * remove the --must-staple option from the command line and obtain a certificate "
"without the Must-Staple extension, or;\n"
" * use the `certonly` subcommand and manually install the certificate into the "
"intended service (e.g. webserver). You must also then manually enable OCSP stapling, "
"as it is required for certificates with the Must-Staple extension to "
"function properly.\n"
" * choose a different installer plugin (such as --nginx or --apache), if possible."
)
# Preflight check for enhancement support by the selected installer
if not enhancements.are_supported(config, installer):
raise errors.NotSupportedError("One ore more of the requested enhancements "
"are not supported by the selected installer")
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(config, authenticator, installer)
domains, certname = _find_domains_or_certname(config, installer)
should_get_cert, lineage = _find_cert(config, domains, certname)
new_lineage = lineage
if should_get_cert:
new_lineage = _get_and_save_cert(le_client, config, domains,
certname, lineage)
cert_path = new_lineage.cert_path if new_lineage else None
fullchain_path = new_lineage.fullchain_path if new_lineage else None
key_path = new_lineage.key_path if new_lineage else None
if should_get_cert:
_report_new_cert(config, cert_path, fullchain_path, key_path)
# The installer error, if any, is being stored as a value here, in order to first print
# relevant advice in a nice way, before re-raising the error for normal processing.
installer_err: Optional[errors.Error] = None
try:
_install_cert(config, le_client, domains, new_lineage)
if enhancements.are_requested(config) and new_lineage:
enhancements.enable(new_lineage, domains, installer, config)
if lineage is None or not should_get_cert:
display_ops.success_installation(domains)
else:
display_ops.success_renewal(domains)
except errors.Error as e:
installer_err = e
finally:
_report_next_steps(config, installer_err, new_lineage,
new_or_renewed_cert=should_get_cert)
# If the installer did fail, re-raise the error to bail out
if installer_err:
raise installer_err
_suggest_donation_if_appropriate(config)
eff.handle_subscription(config, le_client.account)
return None
def _csr_get_and_save_cert(config: configuration.NamespaceConfig,
le_client: client.Client) -> Tuple[
Optional[str], Optional[str], Optional[str]]:
"""Obtain a cert using a user-supplied CSR
This works differently in the CSR case (for now) because we don't
have the privkey, and therefore can't construct the files for a lineage.
So we just save the cert & chain to disk :/
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param client: Client object
:type client: client.Client
:returns: `cert_path`, `chain_path` and `fullchain_path` as absolute
paths to the actual files, or None for each if it's a dry-run.
:rtype: `tuple` of `str`
"""
csr, _ = config.actual_csr
csr_names = crypto_util.get_names_from_req(csr.data)
display_util.notify(
"{action} for {domains}".format(
action="Simulating a certificate request" if config.dry_run else
"Requesting a certificate",
domains=internal_display_util.summarize_domain_list(csr_names)
)
)
cert, chain = le_client.obtain_certificate_from_csr(csr)
if config.dry_run:
logger.debug(
"Dry run: skipping saving certificate to %s", config.cert_path)
return None, None, None
cert_path, chain_path, fullchain_path = le_client.save_certificate(
cert, chain, os.path.normpath(config.cert_path),
os.path.normpath(config.chain_path), os.path.normpath(config.fullchain_path))
return cert_path, chain_path, fullchain_path
def renew_cert(config: configuration.NamespaceConfig, plugins: plugins_disco.PluginsRegistry,
lineage: storage.RenewableCert) -> None:
"""Renew & save an existing cert. Do not install it.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:param lineage: Certificate lineage object
:type lineage: storage.RenewableCert
:returns: `None`
:rtype: None
:raises errors.PluginSelectionError: MissingCommandlineFlag if supplied parameters do not pass
"""
# installers are used in auth mode to determine domain names
installer, auth = plug_sel.choose_configurator_plugins(config, plugins, "certonly")
le_client = _init_le_client(config, auth, installer)
renewed_lineage = _get_and_save_cert(le_client, config, lineage=lineage)
if not renewed_lineage:
raise errors.Error("An existing certificate for the given name could not be found.")
if installer and not config.dry_run:
# In case of a renewal, reload server to pick up new certificate.
updater.run_renewal_deployer(config, renewed_lineage, installer)
display_util.notify(f"Reloading {config.installer} server after certificate renewal")
installer.restart()
def certonly(config: configuration.NamespaceConfig, plugins: plugins_disco.PluginsRegistry) -> None:
"""Authenticate & obtain cert, but do not install it.
This implements the 'certonly' subcommand.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
:raises errors.Error: If specified plugin could not be used
"""
# SETUP: Select plugins and construct a client instance
# installers are used in auth mode to determine domain names
installer, auth = plug_sel.choose_configurator_plugins(config, plugins, "certonly")
le_client = _init_le_client(config, auth, installer)
if config.csr:
cert_path, chain_path, fullchain_path = _csr_get_and_save_cert(config, le_client)
_csr_report_new_cert(config, cert_path, chain_path, fullchain_path)
_report_next_steps(config, None, None, new_or_renewed_cert=not config.dry_run)
_suggest_donation_if_appropriate(config)
eff.handle_subscription(config, le_client.account)
return
domains, certname = _find_domains_or_certname(config, installer)
should_get_cert, lineage = _find_cert(config, domains, certname)
if not should_get_cert:
display_util.notification("Certificate not yet due for renewal; no action taken.",
pause=False)
return
lineage = _get_and_save_cert(le_client, config, domains, certname, lineage)
# If a new cert was issued and we were passed an installer, we can safely
# run `installer.restart()` to load the newly issued certificate
installer_err: Optional[errors.Error] = None
if lineage and installer and not config.dry_run:
logger.info("Reloading %s server after certificate issuance", config.installer)
try:
installer.restart()
except errors.Error as e:
installer_err = e
cert_path = lineage.cert_path if lineage else None
fullchain_path = lineage.fullchain_path if lineage else None
key_path = lineage.key_path if lineage else None
_report_new_cert(config, cert_path, fullchain_path, key_path)
_report_next_steps(config, installer_err, lineage,
new_or_renewed_cert=should_get_cert and not config.dry_run)
if installer_err:
raise installer_err
_suggest_donation_if_appropriate(config)
eff.handle_subscription(config, le_client.account)
def renew(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> None:
"""Renew previously-obtained certificates.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None`
:rtype: None
"""
try:
renewal.handle_renewal_request(config)
finally:
hooks.run_saved_post_hooks()
def make_or_verify_needed_dirs(config: configuration.NamespaceConfig) -> None:
"""Create or verify existence of config, work, and hook directories.
:param config: Configuration object
:type config: configuration.NamespaceConfig
:returns: `None`
:rtype: None
"""
util.set_up_core_dir(config.config_dir, constants.CONFIG_DIRS_MODE, config.strict_permissions)
util.set_up_core_dir(config.work_dir, constants.CONFIG_DIRS_MODE, config.strict_permissions)
hook_dirs = (config.renewal_pre_hooks_dir,
config.renewal_deploy_hooks_dir,
config.renewal_post_hooks_dir,)
for hook_dir in hook_dirs:
util.make_or_verify_dir(hook_dir, strict=config.strict_permissions)
@contextmanager
def make_displayer(config: configuration.NamespaceConfig
) -> Generator[Union[display_util.NoninteractiveDisplay,
display_util.FileDisplay], None, None]:
"""Creates a display object appropriate to the flags in the supplied config.
:param config: Configuration object
:returns: Display object
"""
displayer: Union[None, display_util.NoninteractiveDisplay,
display_util.FileDisplay] = None
devnull: Optional[IO] = None
if config.quiet:
config.noninteractive_mode = True
devnull = open(os.devnull, "w") # pylint: disable=consider-using-with
displayer = display_util.NoninteractiveDisplay(devnull)
elif config.noninteractive_mode:
displayer = display_util.NoninteractiveDisplay(sys.stdout)
else:
displayer = display_util.FileDisplay(
sys.stdout, config.force_interactive)
try:
yield displayer
finally:
if devnull:
devnull.close()
def main(cli_args: List[str] = None) -> Optional[Union[str, int]]:
"""Run Certbot.
:param cli_args: command line to Certbot, defaults to ``sys.argv[1:]``
:type cli_args: `list` of `str`
:returns: value for `sys.exit` about the exit status of Certbot
:rtype: `str` or `int` or `None`
"""
if not cli_args:
cli_args = sys.argv[1:]
log.pre_arg_parse_setup()
if os.environ.get('CERTBOT_SNAPPED') == 'True':
cli_args = snap_config.prepare_env(cli_args)
plugins = plugins_disco.PluginsRegistry.find_all()
logger.debug("certbot version: %s", certbot.__version__)
logger.debug("Location of certbot entry point: %s", sys.argv[0])
# do not log `config`, as it contains sensitive data (e.g. revoke --key)!
logger.debug("Arguments: %r", cli_args)
logger.debug("Discovered plugins: %r", plugins)
# Some releases of Windows require escape sequences to be enable explicitly
misc.prepare_virtual_console()
# note: arg parser internally handles --help (and exits afterwards)
args = cli.prepare_and_parse_args(plugins, cli_args)
config = configuration.NamespaceConfig(args)
# This call is done only for retro-compatibility purposes.
# TODO: Remove this call once zope dependencies are removed from Certbot.
zope.component.provideUtility(config, interfaces.IConfig)
# On windows, shell without administrative right cannot create symlinks required by certbot.
# So we check the rights before continuing.
misc.raise_for_non_administrative_windows_rights()
try:
log.post_arg_parse_setup(config)
make_or_verify_needed_dirs(config)
except errors.Error:
# Let plugins_cmd be run as un-privileged user.
if config.func != plugins_cmd: # pylint: disable=comparison-with-callable
raise
# These calls are done only for retro-compatibility purposes.
# TODO: Remove these calls once zope dependencies are removed from Certbot.
report = reporter.Reporter(config)
zope.component.provideUtility(report, interfaces.IReporter)
util.atexit_register(report.print_messages)
with make_displayer(config) as displayer:
display_obj.set_display(displayer)
return config.func(config, plugins)
| {
"content_hash": "c2b9a5a6e8bce99f95c63a43077d7247",
"timestamp": "",
"source": "github",
"line_count": 1744,
"max_line_length": 100,
"avg_line_length": 38.778669724770644,
"alnum_prop": 0.6625609936418749,
"repo_name": "letsencrypt/letsencrypt",
"id": "098ce32431c7be827546030fffe08cd47c5c13e6",
"size": "67630",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "certbot/certbot/_internal/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "50702"
},
{
"name": "Augeas",
"bytes": "5062"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1336185"
},
{
"name": "Shell",
"bytes": "147823"
}
],
"symlink_target": ""
} |
import sqlite3
import pandas as pd
db_location = "../database.sqlite"
conn = sqlite3.connect(db_location)
df = pd.read_sql(sql="SELECT * FROM country", con=conn)
print(df)
conn.close() | {
"content_hash": "d28e707519b4e203980658504c2e224c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 12.8,
"alnum_prop": 0.703125,
"repo_name": "ShawnHouCHN/Introduction-to-Machine-Learning-and-Data-Mining-E17",
"id": "078b9edbb9f2009678160d4a721787225cccf0ab",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Project1/database-reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2585457"
},
{
"name": "Python",
"bytes": "28581"
}
],
"symlink_target": ""
} |
from analysis import astcollector
import collections
import config
from language.python import ast
from translator import intrinsics
from util.io.report import *
import os.path
from util.io.filesystem import ensureDirectoryExists
remap = {'Shader':'material', 'SkyBox':'skybox', 'SSAO':'ssao',
'DirectionalBilateralBlur':'bilateral', 'AmbientPass':'ambient', 'LightPass':'light',
'DirectionalBlur':'blur', 'RadialBlur':'post'}
shaderNames = ['material', 'skybox', 'ssao', 'bilateral', 'ambient', 'light', 'blur', 'post']
class ShaderStatCollector(object):
def __init__(self):
self.opCount = collections.defaultdict(lambda: 0)
def op(self, op):
opT = type(op)
if isinstance(op, (ast.Load, ast.Store)) and intrinsics.isIntrinsicMemoryOp(op):
name = "I"+opT.__name__
else:
name = opT.__name__
self.opCount[name] += 1
def copies(self, count):
if count:
name = 'CopyLocal'
self.opCount[name] += count
def shaderStats(compiler, stage, name, vscontext, fscontext):
collect = ShaderStatCollector()
for code in (vscontext.code, fscontext.code):
ops, lcls, copies = astcollector.getAll(code)
for op in ops:
collect.op(op)
collect.copies(len(copies))
name = remap[name]
compiler.stats[stage][name] = collect
def functionRatios(collect, classOK):
builder = TableBuilder('functions', '\%', 'contexts', '\%', 'ratio')
builder.setFormats('%d', '%.1f', '%d', '%.1f', '%.1f')
totalCode = 0
totalContexts = 0
for cls in classes:
codeCount = collect.codeCount[cls]
contextCount = collect.contextCount[cls]
totalCode += codeCount
totalContexts += contextCount
for cls in classes:
codeCount = collect.codeCount[cls]
contextCount = collect.contextCount[cls]
if classOK:
builder.row(cls,
codeCount, ratio(100.0*codeCount, totalCode),
contextCount, ratio(100.0*contextCount, totalContexts),
ratio(contextCount, codeCount))
builder.row('total', totalCode, 100.0, totalContexts, 100.0, float(totalContexts)/totalCode)
if not classOK: builder.rewrite(0, 2, 4)
f = open(os.path.join(collect.reportdir, 'context-ratios.tex'), 'w')
builder.dumpLatex(f, "%s-context-ratios" % collect.name)
f.close()
def opTable(stage, lut):
reportdir = os.path.join(config.outputDirectory, 'stats', stage)
ensureDirectoryExists(reportdir)
total = collections.defaultdict(lambda: 0)
for shader, opLUT in lut.iteritems():
for op, count in opLUT.opCount.iteritems():
total[op] += count
if False:
opNames = []
for op in asts:
if total[op] > 0:
opNames.append(op)
else:
opNames = ['DirectCall', 'Load', 'ILoad', 'Store', 'IStore', 'Allocate']
builder = TableBuilder(*opNames)
builder.setFormats(*(['%d']*len(opNames)))
for shader in shaderNames:
opLUT = lut[shader]
builder.row(shader, *[opLUT.opCount[name] for name in opNames])
builder.row('total', *[total[name] for name in opNames])
f = open(os.path.join(reportdir, 'shader-ops.tex'), 'w')
builder.dumpLatex(f, "%s-shader-ops" % stage)
f.close()
def digest(compiler):
for stage, lut in compiler.stats.iteritems():
opTable(stage, lut)
| {
"content_hash": "2572325e64cabd74b351be2c7798106d",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 93,
"avg_line_length": 23.946153846153845,
"alnum_prop": 0.6925795053003534,
"repo_name": "ncbray/pystream",
"id": "80e59d9c519bd3ed74f9a5648243b9537f8b8c4d",
"size": "3691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/stats/shader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2982"
},
{
"name": "C++",
"bytes": "23809"
},
{
"name": "Python",
"bytes": "2232087"
},
{
"name": "Shell",
"bytes": "245"
}
],
"symlink_target": ""
} |
import sys
from oslo_config import cfg
from oslo_log import log
from oslo_policy import opts as policy_opts
from monasca_log_api import conf
from monasca_log_api import version
CONF = conf.CONF
LOG = log.getLogger(__name__)
_CONF_LOADED = False
_GUNICORN_MARKER = 'gunicorn'
def _is_running_under_gunicorn():
"""Evaluates if api runs under gunicorn"""
content = filter(lambda x: x != sys.executable and _GUNICORN_MARKER in x,
sys.argv or [])
return len(list(content) if not isinstance(content, list) else content) > 0
def get_config_files():
"""Get the possible configuration files accepted by oslo.config
This also includes the deprecated ones
"""
# default files
conf_files = cfg.find_config_files(project='monasca',
prog='monasca-log-api')
# deprecated config files (only used if standard config files are not there)
if len(conf_files) == 0:
old_conf_files = cfg.find_config_files(project='monasca',
prog='log-api')
if len(old_conf_files) > 0:
LOG.warning('Found deprecated old location "{}" '
'of main configuration file'.format(old_conf_files))
conf_files += old_conf_files
return conf_files
def parse_args(argv=None):
global _CONF_LOADED
if _CONF_LOADED:
LOG.debug('Configuration has been already loaded')
return
log.set_defaults()
log.register_options(CONF)
argv = (argv if argv is not None else sys.argv[1:])
args = ([] if _is_running_under_gunicorn() else argv or [])
CONF(args=args,
prog=sys.argv[1:],
project='monasca',
version=version.version_str,
default_config_files=get_config_files(),
description='RESTful API to collect log files')
log.setup(CONF,
product_name='monasca-log-api',
version=version.version_str)
conf.register_opts()
policy_opts.set_defaults(CONF)
_CONF_LOADED = True
| {
"content_hash": "d5d5ee99dd25d9dd349d9a76c7206a75",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 29.89855072463768,
"alnum_prop": 0.6175472612699952,
"repo_name": "stackforge/monasca-log-api",
"id": "441a6baedb3ca3a9e961a1c1d66361b6d592cfee",
"size": "2644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monasca_log_api/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "139637"
},
{
"name": "Makefile",
"bytes": "7468"
},
{
"name": "Python",
"bytes": "131743"
},
{
"name": "Shell",
"bytes": "1890"
}
],
"symlink_target": ""
} |
from sqlalchemy import *
from migrate import *
from sqlalchemy.dialects.sqlite import \
BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \
INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR
meta = MetaData()
tasks = Table('tasks', meta,
Column('project', INTEGER),
Column('description', TEXT),
Column('estimate', FLOAT),
Column('risk', TEXT),
Column('variance', FLOAT),
Column('count', INTEGER),
Column('include', BOOLEAN),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
tasks.c.variance.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
col = Column('variance', FLOAT)
col.create(tasks)
assert col is tasks.c.variance | {
"content_hash": "b7931cbd743e0d59f846ba1d0197e85d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 26.75,
"alnum_prop": 0.650200267022697,
"repo_name": "macterra/galton",
"id": "301c259a5f9b1f5dba546bf0301a9051b6015a06",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbrepo/versions/005_Drop_variance.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1886"
},
{
"name": "JavaScript",
"bytes": "12340"
},
{
"name": "Python",
"bytes": "30387"
},
{
"name": "Shell",
"bytes": "30"
}
],
"symlink_target": ""
} |
import L500analysis.utils.constants as constants
from L500analysis.utils.utils import *
import cosmolopy.perturbation as cp
import cosmolopy.density as cdens
import cosmolopy.distance as cdist
def _get_cosmology(omega_M_0=constants.omega_m, omega_lambda_0=constants.omega_l,
omega_b_0=constants.omega_b, omega_n_0=0.0, N_nu=0, omega_k_0=0,
h=constants.hubble, n=constants.power_spectrum_index_n,
sigma_8=constants.sigma_8, baryonic_effects=True) :
return locals()
def calculate_peak_height(Mvir=None, redshift=None,aexp=None,
cosmology=_get_cosmology()) :
redshift = check_redshift_kwargs(redshift=redshift,aexp=aexp)
mass2radius = cp.mass_to_radius(Mvir/cosmology['h'], **cosmology)
sigma_m = cp.sigma_r( mass2radius, redshift, **cosmology)[0]
return constants.delta_c / sigma_m
def calculate_rhoc_cosmopy(redshift=None, aexp=None,
cosmology=_get_cosmology()) :
pass
redshift = check_redshift_kwargs(redshift=redshift,aexp=aexp)
H_z = cdist.hubble_z(redshift, **cosmology) # /s
assert(3.*H_z**2/(8.*np.pi*constants.gravc) == \
cdens.cosmo_densities(**cosmology)[0]*cdist.e_z(redshift,**cosmology)**2)
return 3.*H_z**2/(8.*np.pi*constants.gravc)
def calculate_rhom_cosmopy(redshift=None, aexp=None,
cosmology=_get_cosmology()) :
pass
redshift = check_redshift_kwargs(redshift=redshift,aexp=aexp)
OmM_z = cdens.omega_M_z(redshift,**cosmology)
assert( OmM_z*calculate_rhoc_cosmopy(redshift=redshift, aexp=aexp, **cosmology) == cdens.cosmo_densities(**cosmology[0])*(1-redshift)**3. )
return OmM_z*calculate_rhoc_cosmopy(redshift=redshift, aexp=aexp, **cosmology)
| {
"content_hash": "1100a5240502d5820f14b6ad8c2d27ef",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 143,
"avg_line_length": 37.1875,
"alnum_prop": 0.6661064425770308,
"repo_name": "cavestruz/L500analysis",
"id": "d9391671166b7851348f754c656cbd259f98b4ae",
"size": "1785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "derived_fields/derived_field_tools/cosmology_dependent_properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509320"
}
],
"symlink_target": ""
} |
import glob
import os
from configparser import NoSectionError, MissingSectionHeaderError, NoOptionError, ParsingError
from typing import Set
from rlbot.parsing.bot_config_bundle import BotConfigBundle, ScriptConfigBundle, get_bot_config_bundle, get_script_config_bundle
def scan_directory_for_bot_configs(root_dir) -> Set[BotConfigBundle]:
"""
Recursively scans a directory for all valid bot configs.
:param root_dir: Directory to scan.
:return: The set of bot configs that were found.
"""
configs = set()
for filename in glob.iglob(os.path.join(root_dir, '**/*.cfg'), recursive=True):
try:
bundle = get_bot_config_bundle(filename)
configs.add(bundle)
except (NoSectionError, MissingSectionHeaderError, NoOptionError, AttributeError, ParsingError, FileNotFoundError) as ex:
pass
return configs
def scan_directory_for_script_configs(root_dir) -> Set[ScriptConfigBundle]:
"""
Recursively scans a directory for all valid script configs.
:param root_dir: Directory to scan.
:return: The set of script configs that were found.
"""
configs = set()
for filename in glob.iglob(os.path.join(root_dir, '**/*.cfg'), recursive=True):
try:
bundle = get_script_config_bundle(filename)
configs.add(bundle)
except (NoSectionError, MissingSectionHeaderError, NoOptionError, AttributeError, ParsingError, FileNotFoundError):
pass
return configs
| {
"content_hash": "8c14ad5497272ceadb1f8f2014766c33",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 129,
"avg_line_length": 35,
"alnum_prop": 0.6970099667774087,
"repo_name": "drssoccer55/RLBot",
"id": "8ad1a2e2d3a84201bd621f2395735583001debaa",
"size": "1505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/rlbot/parsing/directory_scanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "13520"
},
{
"name": "Python",
"bytes": "48042"
}
],
"symlink_target": ""
} |
"""Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap
import importlib.machinery
import marshal
import os
import sys
import types
import struct
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
import imp
# XXX Clean up once str8's cstor matches bytes.
LOAD_CONST = bytes([dis.opname.index('LOAD_CONST')])
IMPORT_NAME = bytes([dis.opname.index('IMPORT_NAME')])
STORE_NAME = bytes([dis.opname.index('STORE_NAME')])
STORE_GLOBAL = bytes([dis.opname.index('STORE_GLOBAL')])
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = bytes([dis.HAVE_ARGUMENT])
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
try:
marshal_data = importlib._bootstrap._validate_bytecode_header(fp.read())
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(marshal_data)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = bytes([code[0]])
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes_25
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| {
"content_hash": "fb9f67ff8f70e3fa2d055e0578585f4f",
"timestamp": "",
"source": "github",
"line_count": 641,
"max_line_length": 88,
"avg_line_length": 36.425897035881434,
"alnum_prop": 0.5109854811769241,
"repo_name": "ArcherCraftStore/ArcherVMPeridot",
"id": "cc5b8cc691137b84e7abddb7187a45fb915261bf",
"size": "23349",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Python/Lib/modulefinder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import copy
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import opts as policy_opts
from oslo_policy import policy
from oslo_utils import excutils
from cloudkitty.common import policies
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
policy_opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
_ENFORCER = None
# oslo_policy will read the policy configuration file again when the file
# is changed in runtime so the old policy rules will be saved to
# saved_file_rules and used to compare with new rules to determine the
# rules whether were updated.
saved_file_rules = []
# TODO(gpocentek): provide a proper parent class to handle such exceptions
class PolicyNotAuthorized(Exception):
message = "Policy doesn't allow %(action)s to be performed."
code = 403
def __init__(self, **kwargs):
self.msg = self.message % kwargs
super(PolicyNotAuthorized, self).__init__(self.msg)
def __unicode__(self):
return str(self.msg)
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init():
global _ENFORCER
global saved_file_rules
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF)
register_rules(_ENFORCER)
# Only the rules which are loaded from file may be changed.
current_file_rules = _ENFORCER.file_rules
current_file_rules = _serialize_rules(current_file_rules)
# Checks whether the rules are updated in the runtime
if saved_file_rules != current_file_rules:
saved_file_rules = copy.deepcopy(current_file_rules)
def _serialize_rules(rules):
"""Serialize all the Rule object as string."""
result = [(rule_name, str(rule))
for rule_name, rule in rules.items()]
return sorted(result, key=lambda rule: rule[0])
def authorize(context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: cloudkitty context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. ``compute:create_instance``,
``compute:attach_volume``,
``volume:attach_volume``
:param object: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:raises PolicyNotAuthorized: if verification fails.
"""
if CONF.auth_strategy != "keystone":
return
init()
try:
LOG.debug('Authenticating user with credentials %(credentials)s',
{'credentials': context.to_dict()})
return _ENFORCER.authorize(action, target, context,
do_raise=True,
exc=PolicyNotAuthorized,
action=action)
except policy.PolicyNotRegistered:
with excutils.save_and_reraise_exception():
LOG.exception('Policy not registered')
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': context.to_dict()})
def check_is_admin(context):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
if CONF.auth_strategy != "keystone":
return True
init()
target = {
'user_id': context.user_id,
'project_id': context.project_id,
}
credentials = context.to_policy_values()
return _ENFORCER.authorize('context_is_admin', target, credentials)
def register_rules(enforcer):
enforcer.register_defaults(policies.list_rules())
def get_enforcer():
# This method is for use by oslopolicy CLI scripts. Those scripts need the
# 'output-file' and 'namespace' options, but having those in sys.argv means
# loading the Cloudkitty config options will fail as those are not expected
# to be present. So we pass in an arg list with those stripped out.
conf_args = []
# Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:]
i = 1
while i < len(sys.argv):
if sys.argv[i].strip('-') in ['namespace', 'output-file']:
i += 2
continue
conf_args.append(sys.argv[i])
i += 1
cfg.CONF(conf_args, project='cloudkitty')
init()
return _ENFORCER
| {
"content_hash": "e62e0077cc10e5af278872d4b370fae6",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 112,
"avg_line_length": 32.32666666666667,
"alnum_prop": 0.6537430398020211,
"repo_name": "openstack/cloudkitty",
"id": "9d55e1f81a01a0b1770b77b35f3162d5972f6c68",
"size": "5532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitty/common/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "4904"
},
{
"name": "Python",
"bytes": "1046196"
},
{
"name": "Shell",
"bytes": "16361"
}
],
"symlink_target": ""
} |
from django.db import migrations
"""
We have data inside the MineralType model that sits in unnecessary fields:
1) fracture1 and fracture2
2) cleavage1 and cleavage2
3) lustre1 and lustre2
We want to combine the values of these fields into a new field, that accepts
several values as its input!
"""
def combine_fields(apps, schema_editor):
MineralType = apps.get_model("stein", "MineralType")
for mineral in MineralType.objects.all():
# There may be a more elegant way/DRY in solving this, but this just works!
# Fracture values
fracture = [mineral.fracture1]
if mineral.fracture2:
fracture.append(mineral.fracture2)
mineral.fracture = fracture
# Cleavage values
cleavage = [mineral.cleavage1]
if mineral.cleavage2:
cleavage.append(mineral.cleavage2)
mineral.cleavage = cleavage
# Lustre values
lustre = [mineral.lustre1]
if mineral.lustre2:
lustre.append(mineral.lustre2)
mineral.lustre = lustre
mineral.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
('stein', '0010_auto_20161008_1634'),
]
operations = [
migrations.RunPython(combine_fields)
]
| {
"content_hash": "4a7547cccf3cd5297092d809652df532",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 83,
"avg_line_length": 24.471698113207548,
"alnum_prop": 0.6491904394757132,
"repo_name": "GeoMatDigital/django-geomat",
"id": "4136627caecca6e7712e1b001c78f377ac1cb4a7",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "geomat/stein/migrations/0011_auto_20161008_1659.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16597"
},
{
"name": "Dockerfile",
"bytes": "1091"
},
{
"name": "HTML",
"bytes": "14474"
},
{
"name": "JavaScript",
"bytes": "31354"
},
{
"name": "Makefile",
"bytes": "371"
},
{
"name": "Python",
"bytes": "197468"
},
{
"name": "Shell",
"bytes": "674"
}
],
"symlink_target": ""
} |
import numpy as np
import Gridworld
########################
# Deterministic Policy #
########################
# hyperparameters
GAMMA = 0.9 # discount rate
NUM_EPISODES = 10 # number of episodes
FIRST_VISIT = False # True: first-visit; False: every-visit
def play_one_episode(gridworld, policy):
# randomly select one starting state
valid_states = list(gridworld.actions.keys())
# print(valid_states)
start_state_index = np.random.choice(len(valid_states))
# print(start_state_index)
# print(valid_states[start_state_index])
start_state = valid_states[start_state_index]
gridworld.set_state(start_state)
s = gridworld.get_state()
print("Starting state: ", s) # print starting state
states_and_rewards = [(s, 0)] # a list to store s and r
# proceed until the episode ends
while not gridworld.is_terminal():
a = policy[s]
r = gridworld.step(a)
s = gridworld.get_state()
states_and_rewards.append((s, r))
G = 0 # initialize the return, G, as 0
states_and_returns = [] # an empty list to store s and G
first_step = True # first step is already in the list [(s, 0)]
# calculate and store returns from terminal state to start state
for s, r in reversed(states_and_rewards):
if first_step:
# do nothing for the first step
first_step = False
else:
# append state and return to the list
states_and_returns.append((s, G))
G = r + GAMMA*G # update return
# adjust the order of the list
# (terminal -> start) -> (start -> terminal)
states_and_returns.reverse()
return states_and_returns
if __name__ == '__main__':
# create a default gridworld
# oo -- traversable state
# xx -- untraversable state
# ss -- start state for agent
# +1 -- winning state with reward +1
# -1 -- losing state with reward -1
# x0 x1 x2 x3
# ---------------------
# y0 | oo | oo | oo | +1 |
# ---------------------
# y1 | oo | xx | oo | -1 |
# ---------------------
# y2 | ss | oo | oo | oo |
# ---------------------
gridworld = Gridworld.default_gridworld()
# all traversable states
states = gridworld.all_states()
# draw the reward function
print("Reward function:")
Gridworld.draw_value_function(gridworld.rewards, gridworld)
# input pi, the policy to be evaluated
# a fixed policy
# -----------------
# | R | R | R | T |
# -----------------
# | U | X | R | T |
# -----------------
# | U | R | R | U |
# -----------------
# X -- untraversable
# T -- terminal
policy = {
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 0): 'U',
(1, 2): 'R',
(2, 0): 'U',
(2, 1): 'R',
(2, 2): 'R',
(2, 3): 'U',
}
# draw the initial deterministic policy
print("Initial deterministic policy:")
Gridworld.draw_policy(policy, gridworld)
# # initialize a dictionary V(s) randomly
# # for all traversable states
# V = {}
# for s in states:
# if s in gridworld.actions.keys():
# V[s] = np.random.random()
# else:
# V[s] = 0 # terminal states
# pseudo-random initial value function
V = {
(0, 0): 0.34,
(0, 1): 0.54,
(0, 2): 0.35,
(0, 3): 0.00,
(1, 0): 0.51,
(1, 2): 0.60,
(1, 3): 0.00,
(2, 0): 0.98,
(2, 1): 0.51,
(2, 2): 0.27,
(2, 3): 0.52,
}
# # empty dictionary for value function
# V = {}
# an empty dictionary of lists for storing returns
returns = {}
for s in gridworld.actions.keys():
returns[s] = []
# draw the initial value function
print("Initial value function:")
Gridworld.draw_value_function(V, gridworld)
iteration = 0 # counter
for i in range(NUM_EPISODES):
iteration += 1
states_and_returns = play_one_episode(gridworld, policy)
# FIRST-VISIT #
if FIRST_VISIT:
visited_states = set()
for s, G in states_and_returns:
if s not in visited_states:
returns[s].append(G)
V[s] = np.mean(returns[s])
visited_states.add(s)
# EVERY-VISIT #
else:
for s, G in states_and_returns:
returns[s].append(G)
V[s] = np.mean(returns[s])
# draw value function at each iteration
print("Value function at iteration ", iteration, ":")
Gridworld.draw_value_function(V, gridworld)
# draw the final value function
print("Final value function:")
Gridworld.draw_value_function(V, gridworld)
| {
"content_hash": "3f4c5e98631510a827f195d73fcf770d",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 65,
"avg_line_length": 25.102409638554217,
"alnum_prop": 0.5987520998320134,
"repo_name": "GitYiheng/reinforcement_learning_test",
"id": "40425033553c30e14216be51aa5ee83e9e38977b",
"size": "4246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test03_monte_carlo/every_visit_policy_evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14810"
},
{
"name": "HTML",
"bytes": "15405"
},
{
"name": "JavaScript",
"bytes": "51050"
},
{
"name": "Jupyter Notebook",
"bytes": "3492256"
},
{
"name": "Python",
"bytes": "1033931"
},
{
"name": "Shell",
"bytes": "3108"
}
],
"symlink_target": ""
} |
from django.urls import RegexURLPattern, RegexURLResolver, include
from django.views import defaults
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'url']
handler400 = defaults.bad_request
handler403 = defaults.permission_denied
handler404 = defaults.page_not_found
handler500 = defaults.server_error
def url(regex, view, kwargs=None, name=None):
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
elif callable(view):
return RegexURLPattern(regex, view, kwargs, name)
else:
raise TypeError('view must be a callable or a list/tuple in the case of include().')
| {
"content_hash": "25cdd6ad4be1a22e652e5db7e3c73725",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 102,
"avg_line_length": 40.05,
"alnum_prop": 0.7153558052434457,
"repo_name": "elky/django",
"id": "b7ad15612281e045e4bd80072a5ce38c35ac3193",
"size": "801",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/conf/urls/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55975"
},
{
"name": "HTML",
"bytes": "219349"
},
{
"name": "JavaScript",
"bytes": "252940"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12100085"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import os
#
import numpy
import pylab
pylab.ion()
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import linkage, dendrogram
#
import crosscat.utils.general_utils as gu
import crosscat.utils.file_utils as fu
def save_current_figure(filename, dir='./', close=True, format=None):
if filename is not None:
fu.ensure_dir(dir)
full_filename = os.path.join(dir, filename)
pylab.savefig(full_filename, format=format)
if close:
pylab.close()
def get_aspect_ratio(T_array):
num_rows = len(T_array)
num_cols = len(T_array[0])
aspect_ratio = float(num_cols)/num_rows
return aspect_ratio
def plot_T(T_array, M_c, filename=None, dir='./', close=True):
num_cols = len(T_array[0])
column_names = [M_c['idx_to_name'][str(idx)] for idx in range(num_cols)]
column_names = numpy.array(column_names)
aspect_ratio = get_aspect_ratio(T_array)
pylab.figure()
pylab.imshow(T_array, aspect=aspect_ratio, interpolation='none',
cmap=pylab.matplotlib.cm.Greens)
pylab.gca().set_xticks(list(range(num_cols)))
pylab.gca().set_xticklabels(column_names, rotation=90, size='x-small')
pylab.show()
save_current_figure(filename, dir, close)
def plot_views(T_array, X_D, X_L, M_c, filename=None, dir='./', close=True,
format=None, do_colorbar=False):
view_assignments = X_L['column_partition']['assignments']
view_assignments = numpy.array(view_assignments)
num_features = len(view_assignments)
column_names = [M_c['idx_to_name'][str(idx)] for idx in range(num_features)]
column_names = numpy.array(column_names)
num_views = len(set(view_assignments)) + do_colorbar
disLeft = 0.1
disRight = 0.1
viewSpacing = 0.1 / (max(2, num_views) - 1)
nxtAxDisLeft = disLeft
axpos2 = 0.2
axpos4 = 0.75
view_spacing_2 = (1-viewSpacing*(num_views-1.)-disLeft-disRight) / num_features
fig = pylab.figure()
for view_idx, X_D_i in enumerate(X_D):
# figure out some sizing
is_this_view = view_assignments==view_idx
num_cols_i = sum(is_this_view)
nxtAxWidth = float(num_cols_i) * view_spacing_2
axes_pos = nxtAxDisLeft, axpos2, nxtAxWidth, axpos4
nxtAxDisLeft = nxtAxDisLeft+nxtAxWidth+viewSpacing
# define some helpers
def norm_T(T_array):
mincols = T_array_sub.min(axis=0)
maxcols = T_array_sub.max(axis=0)
T_range = maxcols[numpy.newaxis,:] - mincols[numpy.newaxis,:]
return (T_array_sub-mincols[numpy.newaxis,:]) / T_range
def plot_cluster_lines(X_D_i, num_cols_i):
old_tmp = 0
for cluster_i in range(max(X_D_i)):
cluster_num_rows = numpy.sum(numpy.array(X_D_i) == cluster_i)
if cluster_num_rows > 5:
xs = numpy.arange(num_cols_i + 1) - 0.5
ys = [old_tmp + cluster_num_rows] * (num_cols_i + 1)
pylab.plot(xs, ys, color='red', linewidth=2, hold='true')
pass
old_tmp = old_tmp + cluster_num_rows
pass
return
# plot
argsorted = numpy.argsort(X_D_i)
T_array_sub = T_array[:,is_this_view][argsorted]
normed_T = norm_T(T_array_sub)
currax = fig.add_axes(axes_pos)
pylab.imshow(normed_T, aspect = 'auto',
interpolation='none', cmap=pylab.matplotlib.cm.Greens)
plot_cluster_lines(X_D_i, num_cols_i)
# munge plots
pylab.gca().set_xticks(list(range(num_cols_i)))
pylab.gca().set_xticklabels(column_names[is_this_view], rotation=90, size='x-small')
pylab.gca().set_yticklabels([])
pylab.xlim([-0.5, num_cols_i-0.5])
pylab.ylim([0, len(T_array_sub)])
if view_idx!=0: pylab.gca().set_yticklabels([])
if do_colorbar:
nxtAxWidth = float(1.) * view_spacing_2
axes_pos = nxtAxDisLeft, axpos2, nxtAxWidth, axpos4
cax = fig.add_axes(axes_pos)
cb = pylab.colorbar(cax=cax, ax=currax)
save_current_figure(filename, dir, close, format=format)
def plot_predicted_value(value, samples, modelType, filename='imputed_value_hist.png', plotcolor='red', truth=None, x_axis_lim=None):
fig = pylab.figure()
# Find 50% bounds
curr_std = numpy.std(samples)
curr_delta = 2*curr_std/100;
ndraws = len(samples)
for thresh in numpy.arange(curr_delta, 2*curr_std, curr_delta):
withinbounds = len([i for i in range(len(samples)) if samples[i] < (value+thresh) and samples[i] > (value-thresh)])
if float(withinbounds)/ndraws > 0.5:
break
bounds = [value-thresh, value+thresh]
# Plot histogram
# 'normal_inverse_gamma': continuous_imputation,
# 'symmetric_dirichlet_discrete': multinomial_imputation,
if modelType == 'normal_inverse_gamma':
nx, xbins, rectangles = pylab.hist(samples,bins=40,normed=0,color=plotcolor)
elif modelType == 'symmetric_dirichlet_discrete':
bin_edges = numpy.arange(numpy.min(samples)-0.5, numpy.max(samples)-0.5, 1)
nx, xbins, rectangles = pylab.hist(samples,bin_edges,normed=0,color=plotcolor)
else:
print('Unsupported model type')
pylab.clf()
nx_frac = nx/float(sum(nx))
x_width = [(xbins[i+1]-xbins[i]) for i in range(len(xbins)-1)]
pylab.bar(xbins[0:len(xbins)-1],nx_frac,x_width,color=plotcolor)
pylab.plot([value, value],[0,1], color=plotcolor, hold=True,linewidth=2)
pylab.plot([bounds[0], bounds[0]],[0,1], color=plotcolor, hold=True, linestyle='--',linewidth=2)
pylab.plot([bounds[1], bounds[1]],[0,1], color=plotcolor, hold=True, linestyle='--',linewidth=2)
if truth != None:
pylab.plot([truth, truth],[0,1], color='green', hold=True, linestyle='--',linewidth=2)
pylab.show()
if x_axis_lim != None:
pylab.xlim(x_axis_lim)
save_current_figure(filename, './', False)
return pylab.gca().get_xlim()
def do_gen_feature_z(X_L_list, X_D_list, M_c, filename, tablename=''):
num_cols = len(X_L_list[0]['column_partition']['assignments'])
column_names = [M_c['idx_to_name'][str(idx)] for idx in range(num_cols)]
column_names = numpy.array(column_names)
# extract unordered z_matrix
num_latent_states = len(X_L_list)
z_matrix = numpy.zeros((num_cols, num_cols))
for X_L in X_L_list:
assignments = X_L['column_partition']['assignments']
for i in range(num_cols):
for j in range(num_cols):
if assignments[i] == assignments[j]:
z_matrix[i, j] += 1
z_matrix /= float(num_latent_states)
# hierachically cluster z_matrix
Y = pdist(z_matrix)
Z = linkage(Y)
pylab.figure()
dendrogram(Z)
intify = lambda x: int(x.get_text())
reorder_indices = map(intify, pylab.gca().get_xticklabels())
pylab.close()
# REORDER!
z_matrix_reordered = z_matrix[:, reorder_indices][reorder_indices, :]
column_names_reordered = column_names[reorder_indices]
# actually create figure
fig = pylab.figure()
fig.set_size_inches(16, 12)
pylab.imshow(z_matrix_reordered, interpolation='none',
cmap=pylab.matplotlib.cm.Greens)
pylab.colorbar()
if num_cols < 14:
pylab.gca().set_yticks(list(range(num_cols)))
pylab.gca().set_yticklabels(column_names_reordered, size='x-small')
pylab.gca().set_xticks(list(range(num_cols)))
pylab.gca().set_xticklabels(column_names_reordered, rotation=90, size='x-small')
else:
pylab.gca().set_yticks(list(range(0, num_cols, 2)))
pylab.gca().set_yticklabels(column_names_reordered[::2], size='x-small')
pylab.gca().set_xticks(list(range(1, num_cols, 2)))
pylab.gca().set_xticklabels(column_names_reordered[1::2],
rotation=90, size='small')
pylab.title('column dependencies for: %s' % tablename)
pylab.savefig(filename)
pylab.close()
def legend_outside(ax=None, bbox_to_anchor=(0.5, -.25), loc='upper center',
ncol=None, label_cmp=None):
# labels must be set in original plot call: plot(..., label=label)
if ax is None:
ax = pylab.gca()
handles, labels = ax.get_legend_handles_labels()
label_to_handle = dict(zip(labels, handles))
labels = label_to_handle.keys()
if label_cmp is not None:
labels = sorted(labels, cmp=label_cmp)
handles = [label_to_handle[label] for label in labels]
if ncol is None:
ncol = min(len(labels), 3)
lgd = ax.legend(handles, labels, loc=loc, ncol=ncol,
bbox_to_anchor=bbox_to_anchor, prop={"size":14})
return
int_cmp = lambda x, y: cmp(int(x), int(y))
def legend_outside_from_dicts(marker_dict, color_dict,
marker_label_prepend='', color_label_prepend='',
ax=None, bbox_to_anchor=(0.5, -.07), loc='upper center',
ncol=None, label_cmp=None,
marker_color='k'):
marker_handles = []
marker_labels = []
for label in sorted(marker_dict.keys(), cmp=int_cmp):
marker = marker_dict[label]
handle = pylab.Line2D([],[], color=marker_color, marker=marker, linewidth=0)
marker_handles.append(handle)
marker_labels.append(marker_label_prepend+label)
color_handles = []
color_labels = []
for label in sorted(color_dict.keys(), cmp=int_cmp):
color = color_dict[label]
handle = pylab.Line2D([],[], color=color, linewidth=3)
color_handles.append(handle)
color_labels.append(color_label_prepend+label)
num_marker_handles = len(marker_handles)
num_color_handles = len(color_handles)
num_to_add = abs(num_marker_handles - num_color_handles)
if num_marker_handles < num_color_handles:
add_to_handles = marker_handles
add_to_labels = marker_labels
else:
add_to_handles = color_handles
add_to_labels = color_labels
for add_idx in range(num_to_add):
add_to_handles.append(pylab.Line2D([],[], color=None, linewidth=0))
add_to_labels.append('')
handles = gu.roundrobin(marker_handles, color_handles)
labels = gu.roundrobin(marker_labels, color_labels)
if ax is None:
ax = pylab.gca()
if ncol is None:
ncol = max(num_marker_handles, num_color_handles)
lgd = ax.legend(handles, labels, loc=loc, ncol=ncol,
bbox_to_anchor=bbox_to_anchor, prop={"size":14})
return
def savefig_legend_outside(filename, ax=None, bbox_inches='tight', dir='./'):
if ax is None:
ax = pylab.gca()
lgd = ax.get_legend()
fu.ensure_dir(dir)
full_filename = os.path.join(dir, filename)
pylab.savefig(full_filename,
bbox_extra_artists=(lgd,),
bbox_inches=bbox_inches,
)
return
def _plot_diagnostic_with_mean(data_arr, hline=None):
data_mean = data_arr.mean(axis=1)
#
fh = pylab.figure()
pylab.plot(data_arr, color='k')
pylab.plot(data_mean, linewidth=3, color='r')
if hline is not None:
pylab.axhline(hline)
return fh
def plot_diagnostics(diagnostics_dict, hline_lookup=None, which_diagnostics=None):
if which_diagnostics is None:
which_diagnostics = diagnostics_dict.keys()
if hline_lookup is None:
hline_lookup = dict()
for which_diagnostic in which_diagnostics:
data_arr = diagnostics_dict[which_diagnostic]
hline = hline_lookup.get(which_diagnostic)
fh = _plot_diagnostic_with_mean(data_arr, hline=hline)
pylab.xlabel('iter')
pylab.ylabel(which_diagnostic)
return fh
def show_parameters(parameters):
if len(parameters) == 0: return
ax = pylab.gca()
text = gu.get_dict_as_text(parameters)
pylab.text(0, 1, text, transform=ax.transAxes,
va='top', size='small', linespacing=1.0)
return
| {
"content_hash": "6b3a292dff9848f360e199b12a7a4d30",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 133,
"avg_line_length": 40.14666666666667,
"alnum_prop": 0.6152441049485221,
"repo_name": "mit-probabilistic-computing-project/crosscat",
"id": "c2fc6f5fd2bc84f9e743f43a60df7ba8d51778e5",
"size": "12849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/tests/plot_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "282503"
},
{
"name": "CSS",
"bytes": "477"
},
{
"name": "HTML",
"bytes": "7386"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "Makefile",
"bytes": "2474"
},
{
"name": "Matlab",
"bytes": "61001"
},
{
"name": "Python",
"bytes": "669565"
},
{
"name": "Ruby",
"bytes": "5045"
},
{
"name": "Shell",
"bytes": "18291"
}
],
"symlink_target": ""
} |
from test_server import Response
from tests.util import BaseGrabTestCase
from tests.util import build_grab
class ExtensionPyqueryTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_pyquery_handler(self):
self.server.add_response(
Response(data=b"<body><h1>Hello world</h1><footer>2014</footer>")
)
grab = build_grab()
grab.go(self.server.get_url())
self.assertEqual(grab.doc.pyquery("h1").text(), "Hello world")
def test_national_utf_symbol(self):
msg = (
u"P.S. Bir daha öz fikrimi xatırladım ki,rhen ve "
u"qelben sene bağlı insan başqasına ehtiyac duymaz."
)
self.server.add_response(
Response(data=b"<html><body><p>%s</p></body>" % msg.encode("utf-8"))
)
grab = build_grab()
grab.go(self.server.get_url())
self.assertEqual(grab.doc.pyquery("p")[0].text_content(), msg)
| {
"content_hash": "07a828d505a2ff70b422ddac96b9d90c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.6103626943005181,
"repo_name": "lorien/grab",
"id": "3f3976eeef1ce3d2ee5a1fe688014f9ce8510ba1",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ext_pyquery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "250730"
},
{
"name": "Makefile",
"bytes": "879"
},
{
"name": "Python",
"bytes": "347132"
}
],
"symlink_target": ""
} |
from common_funcs import *
from time import gmtime, strftime
from os import system
thetime = strftime("%a-%d-%b-%Y-%H_%M", gmtime())
resultsdir = "results/debug-explicit/"+thetime
system("mkdir -p "+resultsdir)
system("git log -1 --format=\"%%h;%%ad\" > %s/git-revision.txt" % (resultsdir))
#blow away old configs, etc.
#must be run first so pull works
killall_java("lipstick-hosts")
killall_java("cassandra-hosts")
set_up_cassandra_ring()
prepare_cassandra_for_lipstick("cassandra-hosts")
prepare_lipstick_all("lipstick-hosts")
prepare_lipstick("east-lipstick-hosts", "east-cassandra-hosts")
prepare_lipstick("west-lipstick-hosts", "west-cassandra-hosts")
enable_backend_reads()
clients = 1024
#potential causality stuff
for variance, prob in [(1, .88)]:
set_lipstick_variance_potential(variance)
set_lipstick_reply_prob_explicit(prob)
enable_local_checkpointing("lipstick-hosts")
enable_backend_reads()
set_lipstick_clients_per_server(clients)
#set_lipstick_shim_param_all("lipstick-hosts", "backend.async.sleepms", "900000")
for iteration in range(0, 1):
for threads in [8, 16, 32]:
set_lipstick_threads_per_server(threads)
reset_lipstick_states()
set_backend_explicit()
run_lipstick_experiment(False, "%s/E-shim-%d-%dC-%fP-%d" % (resultsdir, threads, clients, prob, iteration))
| {
"content_hash": "ec4990b24de6efd58a2e4eb68ccdc025",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 119,
"avg_line_length": 32.88095238095238,
"alnum_prop": 0.6944243301955105,
"repo_name": "pbailis/bolton-sigmod2013-code",
"id": "e086940a9b6fdefa1e9214aebae1668c0d16ba54",
"size": "1384",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shim-code/setup-cluster/old-harness/debug-explicit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1391"
},
{
"name": "Java",
"bytes": "587795"
},
{
"name": "Python",
"bytes": "184518"
},
{
"name": "Shell",
"bytes": "17220"
}
],
"symlink_target": ""
} |
states = {
'Oregon': 'OR',
'Florida': 'FL',
'California': 'CA',
'New York': 'NY',
'Michigan': 'MI'
}
cities = {
'CA': 'San Francisco',
'MI': 'Detroit',
'FL': 'Jacksonville'
}
cities['NY'] = 'New York'
cities['OR'] = 'Portland'
print '-' * 10
print "NY State has: ", cities['NY']
print "OR State has: ", cities['OR']
print '-' * 10
print "Michigan's abbreviation is: ", states['Michigan']
print "Florida's abbreviation is: ", states['Florida']
print '-' * 10
print "Michigan has: ", cities[states['Michigan']]
print "Florida has: ", cities[states['Florida']]
print '-' * 10
for state, abbrev in states.items():
print "%s is abbreviated %s" % (state, abbrev)
print '-' * 10
for abbrev, city in cities.items():
print "%s has the city %s" % (abbrev, city)
print '-' * 10
for state, abbrev in states.items():
print "%s state is abbreviated %s and has city %s" % (
state, abbrev, cities[abbrev])
print '-' * 10
state = states.get('Texas')
if not state:
print "Sorry, no Texas."
city = cities.get('TX', 'Does Not Exist')
print "The city for the state 'TX' is: %s" % city | {
"content_hash": "85a59febfd44b811a46db8cd8a6ca5db",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 58,
"avg_line_length": 22.6,
"alnum_prop": 0.5991150442477876,
"repo_name": "chrisortman/CIS-121",
"id": "19ed4aba45989d5be4d1c411fbdc38a42f643490",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k0765065/lpthw/ex39.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "413801"
}
],
"symlink_target": ""
} |
import logging
# Skip bson in requirements , pymongo provides
# noinspection PyPackageRequirements
from bson.timestamp import Timestamp
from copy_reg import pickle
from multiprocessing import Pool, TimeoutError
from types import MethodType
from ResolverThread import ResolverThread
from mongodb_consistent_backup.Common import MongoUri
from mongodb_consistent_backup.Errors import Error, OperationError
from mongodb_consistent_backup.Oplog import OplogState
from mongodb_consistent_backup.Pipeline import Task
# Allows pooled .apply_async()s to work on Class-methods:
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
pickle(MethodType, _reduce_method)
class Resolver(Task):
def __init__(self, manager, config, timer, base_dir, backup_dir, tailed_oplogs, backup_oplogs):
super(Resolver, self).__init__(self.__class__.__name__, manager, config, timer, base_dir, backup_dir)
self.tailed_oplogs = tailed_oplogs
self.backup_oplogs = backup_oplogs
self.compression_supported = ['none', 'gzip']
self.resolver_summary = {}
self.resolver_state = {}
self.running = False
self.stopped = False
self.completed = False
self._pool = None
self._pooled = []
self._results = {}
self.threads(self.config.oplog.resolver.threads)
try:
self._pool = Pool(processes=self.threads())
except Exception, e:
logging.fatal("Could not start oplog resolver pool! Error: %s" % e)
raise Error(e)
def close(self):
if self._pool and self.stopped:
logging.debug("Stopping all oplog resolver threads")
self._pool.terminate()
logging.info("Stopped all oplog resolver threads")
self.stopped = True
def get_backup_end_max_ts(self):
end_ts = None
for shard in self.backup_oplogs:
instance = self.backup_oplogs[shard]
if 'last_ts' in instance and instance['last_ts'] is not None:
last_ts = instance['last_ts']
if end_ts is None or last_ts > end_ts:
end_ts = last_ts
return end_ts
def get_consistent_end_ts(self):
end_ts = None
bkp_end_ts = self.get_backup_end_max_ts()
for shard in self.tailed_oplogs:
instance = self.tailed_oplogs[shard]
if 'last_ts' in instance and instance['last_ts'] is not None:
last_ts = instance['last_ts']
if end_ts is None or last_ts < end_ts:
end_ts = last_ts
if last_ts < bkp_end_ts:
end_ts = bkp_end_ts
if end_ts is None:
# Happens when there were _no_ oplog changes since the backup
# end, i. e. when all tailed-oplogs are empty
end_ts = bkp_end_ts
return Timestamp(end_ts.time + 1, 0)
def done(self, done_uri):
if done_uri in self._pooled:
logging.debug("Resolving completed for: %s" % done_uri)
self._pooled.remove(done_uri)
else:
raise OperationError("Unexpected response from resolver thread: %s" % done_uri)
def wait(self, max_wait_secs=6 * 3600, poll_secs=2):
if len(self._pooled) > 0:
waited_secs = 0
self._pool.close()
while len(self._pooled):
logging.debug("Waiting for %i oplog resolver thread(s) to stop" % len(self._pooled))
try:
for thread_name in self._pooled:
thread = self._results[thread_name]
thread.get(poll_secs)
except TimeoutError:
if waited_secs < max_wait_secs:
waited_secs += poll_secs
else:
raise OperationError("Waited more than %i seconds for Oplog resolver! I will assume there is a problem and exit")
def run(self):
uri = None
try:
logging.info("Resolving oplogs (options: threads=%s, compression=%s)" % (self.threads(), self.compression()))
self.timer.start(self.timer_name)
self.running = True
consistent_end_ts = self.get_consistent_end_ts()
logging.info("Consistent end timestamp for all shards is %s" % consistent_end_ts)
for shard in self.backup_oplogs:
backup_oplog = self.backup_oplogs[shard]
self.resolver_state[shard] = OplogState(self.manager, None, backup_oplog['file'])
uri = MongoUri(backup_oplog['uri']).get()
if shard in self.tailed_oplogs:
tailed_oplog = self.tailed_oplogs[shard]
backup_last_ts = backup_oplog['last_ts']
if 'last_ts' in tailed_oplog and tailed_oplog['last_ts'] is not None:
tailed_last_ts = tailed_oplog['last_ts']
else:
tailed_last_ts = backup_last_ts
if backup_last_ts is None and tailed_last_ts is None:
logging.info("No oplog changes to resolve for %s" % uri)
elif backup_last_ts > tailed_last_ts:
logging.fatal(
"Backup oplog is newer (%s) than the tailed oplog (%s)! This situation is unsupported. Please retry backup" % (
backup_last_ts, tailed_last_ts))
raise OperationError("Backup oplog is newer than the tailed oplog!")
else:
thread_name = uri.str()
logging.debug("Starting ResolverThread: %s" % thread_name)
self._results[thread_name] = self._pool.apply_async(ResolverThread(
self.config.dump(),
self.resolver_state[shard],
uri,
tailed_oplog.copy(),
backup_oplog.copy(),
consistent_end_ts,
self.compression()
).run, callback=self.done)
self._pooled.append(thread_name)
else:
logging.info("No tailed oplog for host %s" % uri)
if len(self._pooled) > 0:
self.wait()
# Shut down the thread pool to avoid spurious exceptions
self._pool.join()
self.completed = True
logging.info("Oplog resolving completed in %.2f seconds" % self.timer.duration(self.timer_name))
except Exception, e:
if uri is not None:
logging.error("Resolver failed for %s: %s" % (uri, e))
else:
logging.error("Resolver failed: %s" % e)
raise e
finally:
self.timer.stop(self.timer_name)
self.running = False
self.stopped = True
for shard in self.resolver_state:
self.resolver_summary[shard] = self.resolver_state[shard].get()
return self.resolver_summary
| {
"content_hash": "1a5ec99617bc1cec154bc87e4fb72625",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 139,
"avg_line_length": 43.205882352941174,
"alnum_prop": 0.5473110959836623,
"repo_name": "Percona-Lab/mongodb_consistent_backup",
"id": "3a69f94214c1167f5aca5f161d73684e512f905f",
"size": "7345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongodb_consistent_backup/Oplog/Resolver/Resolver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "707"
},
{
"name": "JavaScript",
"bytes": "536"
},
{
"name": "Makefile",
"bytes": "5715"
},
{
"name": "Python",
"bytes": "235278"
},
{
"name": "Shell",
"bytes": "7177"
}
],
"symlink_target": ""
} |
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from ceilometer.openstack.common import eventlet_backdoor
from ceilometer.openstack.common._i18n import _LE, _LI, _LW
from ceilometer.openstack.common import systemd
from ceilometer.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self, graceful=False):
self.tg.stop(graceful)
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
| {
"content_hash": "f2c5b64c6f3187ffc24316501bfc041f",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 79,
"avg_line_length": 29.703703703703702,
"alnum_prop": 0.5866583541147132,
"repo_name": "yanheven/ceilometer",
"id": "abdab60fbcfba71fcf3f5e46ff19feff8aac8974",
"size": "15206",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/openstack/common/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2969045"
},
{
"name": "Shell",
"bytes": "4227"
}
],
"symlink_target": ""
} |
from flask import Flask, send_file, url_for, render_template
from datetime import datetime
app = Flask(__name__)
dogs = ['Doberman', 'Golden Retriever', 'Snoop', 'Beagle', 'Bull Terrier',\
'Akita', 'Siberian Husky', 'Pit Bull', 'Pug', 'Black Lab']
@app.route('/')
def index():
"""
Returns
"""
img_url = url_for('static', filename='x.gif')
now = datetime.now()
return render_template('index.html',\
time=now,\
img_url=img_url)
@app.route('/user/<username>')
def render_username(username):
"""
Returns a specified page at the url /user/<username>
"""
img_url = url_for('static', filename='hal.jpg')
return render_template('user.html',\
username=username,\
img_url=img_url)
# TODO: Add '/dogs/' endpoint that returns random dog from list of dogs.
@app.route('/dogs/')
def render_dogs_page():
"""
Returns random dog from 'dogs' array above.
"""
### PUT DOGS ENDPOINT CODE HERE
pass # Remove this line when you write your own code
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=True)
| {
"content_hash": "561fd39b770443f85131d1e98e8f237d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 75,
"avg_line_length": 28.38095238095238,
"alnum_prop": 0.5713087248322147,
"repo_name": "DevOpsBootcamp/tinsy-flask-app",
"id": "d52cfeb648a30475bd88258684ff101af895c9eb",
"size": "1214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47"
},
{
"name": "HTML",
"bytes": "491"
},
{
"name": "Python",
"bytes": "2017"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
from django.conf import settings
import lepus.models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(blank=True, verbose_name='last login', null=True)),
('is_superuser', models.BooleanField(verbose_name='superuser status', help_text='Designates that this user has all permissions without explicitly assigning them.', default=False)),
('username', models.CharField(unique=True, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username', error_messages={'unique': 'A user with that username already exists.'}, max_length=30)),
('first_name', models.CharField(blank=True, verbose_name='first name', max_length=30)),
('last_name', models.CharField(blank=True, verbose_name='last name', max_length=30)),
('email', models.EmailField(blank=True, verbose_name='email address', max_length=254)),
('is_staff', models.BooleanField(verbose_name='staff status', help_text='Designates whether the user can log into this admin site.', default=False)),
('is_active', models.BooleanField(verbose_name='active', help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', default=True)),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('seat', models.CharField(blank=True, verbose_name='座席', max_length=32)),
('last_score_time', models.DateTimeField(blank=True, verbose_name='最終得点日時', null=True)),
('groups', models.ManyToManyField(to='auth.Group', related_name='user_set', blank=True, verbose_name='groups', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_query_name='user')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', lepus.models.UserManager()),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('answer', models.CharField(verbose_name='解答', max_length=256)),
],
),
migrations.CreateModel(
name='AttackPoint',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('token', models.CharField(unique=True, verbose_name='トークン', max_length=256)),
('point', models.IntegerField(verbose_name='得点')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('name', models.CharField(verbose_name='カテゴリ名', max_length=50)),
('ordering', models.IntegerField(verbose_name='表示順序', default=100)),
],
options={
'ordering': ('ordering',),
},
),
migrations.CreateModel(
name='Config',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('key', models.CharField(unique=True, verbose_name='設定項目', max_length=256)),
('value_str', models.TextField(verbose_name='シリアライズされた値')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('name', models.CharField(verbose_name='ファイル名', max_length=256)),
('file', models.FileField(upload_to='question/', verbose_name='ファイル', max_length=256)),
('is_public', models.BooleanField(verbose_name='公開するか', default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('flag', models.CharField(unique=True, verbose_name='Flag', max_length=200)),
('point', models.IntegerField(verbose_name='得点')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('title', models.CharField(verbose_name='タイトル', max_length=80)),
('body', models.TextField(verbose_name='本文')),
('is_public', models.BooleanField(verbose_name='公開にするか', default=False)),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('ordering', models.IntegerField(unique=True, verbose_name='表示順序', default=100)),
('title', models.CharField(verbose_name='タイトル', max_length=50)),
('sentence', models.TextField(verbose_name='問題文')),
('max_answers', models.IntegerField(blank=True, verbose_name='最大回答者数', null=True)),
('max_failure', models.IntegerField(blank=True, verbose_name='最大回答数', null=True)),
('is_public', models.BooleanField(verbose_name='公開にするか', default=False)),
('category', models.ForeignKey(to='lepus.Category', verbose_name='カテゴリ')),
],
options={
'ordering': ('ordering',),
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('name', models.CharField(unique=True, verbose_name='チーム名', max_length=32)),
('password', models.CharField(verbose_name='チームパスワード', max_length=128)),
('last_score_time', models.DateTimeField(blank=True, verbose_name='最終得点日時', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserConnection',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='作成日時', auto_now_add=True)),
('updated_at', models.DateTimeField(verbose_name='最終更新日時', auto_now=True)),
('ip', models.GenericIPAddressField(verbose_name='IPアドレス')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='ユーザー')),
],
options={
'ordering': ('-updated_at',),
},
),
migrations.AddField(
model_name='flag',
name='question',
field=models.ForeignKey(to='lepus.Question', verbose_name='問題'),
),
migrations.AddField(
model_name='file',
name='question',
field=models.ForeignKey(to='lepus.Question', verbose_name='問題'),
),
migrations.AlterUniqueTogether(
name='category',
unique_together=set([('name', 'ordering')]),
),
migrations.AddField(
model_name='attackpoint',
name='question',
field=models.ForeignKey(to='lepus.Question', verbose_name='問題'),
),
migrations.AddField(
model_name='attackpoint',
name='team',
field=models.ForeignKey(to='lepus.Team', verbose_name='チーム'),
),
migrations.AddField(
model_name='attackpoint',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='ユーザー'),
),
migrations.AddField(
model_name='answer',
name='flag',
field=models.ForeignKey(to='lepus.Flag', blank=True, null=True),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(to='lepus.Question', verbose_name='問題'),
),
migrations.AddField(
model_name='answer',
name='team',
field=models.ForeignKey(to='lepus.Team', verbose_name='チーム'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='ユーザー'),
),
migrations.AddField(
model_name='user',
name='team',
field=models.ForeignKey(to='lepus.Team', blank=True, verbose_name='チーム', null=True),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(to='auth.Permission', related_name='user_set', blank=True, verbose_name='user permissions', help_text='Specific permissions for this user.', related_query_name='user'),
),
migrations.AlterUniqueTogether(
name='userconnection',
unique_together=set([('user', 'ip')]),
),
migrations.AlterUniqueTogether(
name='answer',
unique_together=set([('team', 'flag')]),
),
]
| {
"content_hash": "6edb09f040903f750d21f2d50a0f6922",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 432,
"avg_line_length": 51.473684210526315,
"alnum_prop": 0.5604845052697813,
"repo_name": "lepus-ctf/lepus-api",
"id": "3928c0c905670dfa5f77846bce1f138e37a324d6",
"size": "13258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lepus/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "249"
},
{
"name": "Python",
"bytes": "59916"
}
],
"symlink_target": ""
} |
import json
from api.test import BaseTestCase
class TestUpdateItem(BaseTestCase):
def test_update_bucket_list_item(self):
bucket_list_one = {
"description": "Movies i have to watch by the end of the week",
"status": "Pending",
"title": "Entertainment",
"user_id": 1
}
self.client.post('/api/v1/bucketlists',
headers={
'Authorization': 'JWT ' + self.token
},
data=json.dumps(bucket_list_one),
content_type='application/json')
item_one = {
"description": "Horror movies",
"status": "Pending",
"title": "Wrong turn 6"
}
self.client.post('/api/v1/bucketlists/1/items',
headers={
'Authorization': 'JWT ' + self.token
},
data=json.dumps(item_one),
content_type='application/json')
item_one_modified = {
"description": "Horror movies",
"status": "Pending",
"title": "The walking dead"
}
response = self.client.put('/api/v1/bucketlists/1/items/1',
headers={
'Authorization': 'JWT ' + self.token
},
data=json.dumps(item_one_modified),
content_type='application/json')
self.assertNotIn('Wrong turn 6', str(response.data))
self.assertIn('The walking dead', str(response.data))
| {
"content_hash": "0bff329a4b8a308d23c676bb5e456893",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 39.06818181818182,
"alnum_prop": 0.44502617801047123,
"repo_name": "EdwinKato/bucket-list",
"id": "5dafde7cc044bee052837d9aa8595df939dae79c",
"size": "1719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/api/tests/test_update_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "326270"
},
{
"name": "HTML",
"bytes": "27055"
},
{
"name": "JavaScript",
"bytes": "43847"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "54145"
},
{
"name": "Shell",
"bytes": "520"
},
{
"name": "TypeScript",
"bytes": "39360"
}
],
"symlink_target": ""
} |
'''
Covenant Add-on
Copyright (C) 2017 homik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urllib, urlparse, base64, json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['trt.pl']
self.base_link = 'http://www.trt.pl/'
self.search_link = 'szukaj-filmy/%s'
def movie(self, imdb, title, localtitle, aliases, year):
return title + ' ' + year
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return tvshowtitle;
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return url + ' s' + season.zfill(2) + 'e' + episode.zfill(2)
def contains_word(self, str_to_check, word):
return re.search(r'\b' + word + r'\b', str_to_check, re.IGNORECASE)
def contains_all_wors(self, str_to_check, words):
for word in words:
if not self.contains_word(str_to_check, word):
return False
return True
def sources(self, url, hostDict, hostprDict):
try:
words = cleantitle.getsearch(url).split(' ')
search_url = urlparse.urljoin(self.base_link, self.search_link) % urllib.quote_plus(url);
result = client.request(search_url)
sources = []
result = client.parseDOM(result, 'div', attrs={'class':'tile-container'})
for el in result :
main = client.parseDOM(el, 'h3');
link = client.parseDOM(main, 'a', ret='href')[0];
found_title = client.parseDOM(main, 'a')[0];
if not self.contains_all_wors(found_title, words):
continue
quality = client.parseDOM(el, 'a', attrs={'class':'qualityLink'});
q = 'SD'
if quality:
if(quality[0] == '720p'):
q='HD'
if(quality[0]=='1080p'):
q='1080p'
lang, info = self.get_lang_by_type(found_title)
sources.append({'source': 'trt', 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def get_lang_by_type(self, lang_type):
if self.contains_word(lang_type, 'lektor') :
return 'pl', 'Lektor'
if self.contains_word(lang_type, 'Dubbing') :
return 'pl', 'Dubbing'
if self.contains_word(lang_type, 'Napisy') :
return 'pl', 'Napisy'
if self.contains_word(lang_type, 'Polski') :
return 'pl', None
return 'en', None
def resolve(self, url):
try:
return urlparse.urljoin(self.base_link, url);
except:
return
| {
"content_hash": "ea489ebf388ebcd22d5f8c4ef0383978",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 146,
"avg_line_length": 37.73076923076923,
"alnum_prop": 0.52217125382263,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "2e9350bb93f35b5545f6126de1757f48b9893248",
"size": "3949",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "script.module.covenant/lib/resources/lib/sources/pl/trt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser(description='''
MITE
This tool finds mutually recursive acronyms, or recursonyms.
Uses the english dictionary in /usr/share/dict/words by default.
''')
parser.add_argument('length', default=4, type=int, nargs='?', help='The number of letters in the recursonym')
parser.add_argument('--dictionary-path', dest='dictionaryPath', default='/usr/share/dict/words')
opts = parser.parse_args()
# return a set of all words (excluding proper nouns, etc.)
def getWords(dictionaryPath):
with open(dictionaryPath) as f:
wordList = f.read().split()
return { word for word in wordList if word.islower() }
ALL_WORDS = getWords(opts.dictionaryPath)
# Returns a string of sorted unique letters in the given word
def calcKey(word):
return ''.join(sorted(set(word)))
# returns the number of unique starting letters among the words in the given list
def uniqueStart(stringList):
return len({ x[0] for x in stringList })
def findRecursonyms(length, wordList):
sizedWords = [x for x in wordList if len(x) == length]
wordMap = defaultdict(list)
for word in sizedWords:
key = calcKey(word)
if len(key) == length:
wordMap[key].append(word)
wordMap = { key : wordMap[key] for key in wordMap
if uniqueStart(wordMap[key]) == length }
return wordMap
def printRecursonyms(wordMap):
for key in wordMap:
for word in wordMap[key]:
print(word.upper())
print('\n')
printRecursonyms( findRecursonyms(opts.length, ALL_WORDS) )
| {
"content_hash": "78ec904bf93824e844446753d56410a1",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 109,
"avg_line_length": 32.22,
"alnum_prop": 0.6927374301675978,
"repo_name": "nathanfdunn/MITE",
"id": "51d8077bdd59f32302c5fd5ed2ef608b66d79bf5",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MITE.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "737"
},
{
"name": "TeX",
"bytes": "571"
}
],
"symlink_target": ""
} |
"""
:author: Thomas Calmant
:copyright: Copyright 2015, isandlaTech
:license: Apache License 2.0
:version: 0.0.1
:status: Alpha
..
Copyright 2015 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
| {
"content_hash": "5ff53ee41d1b72cc2303caddd026dfbe",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 80,
"avg_line_length": 30.78125,
"alnum_prop": 0.6568527918781726,
"repo_name": "gattazr/cohorte-herald",
"id": "8c63d44fae91587e957f0fed110028ad99e42675",
"size": "1035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/herald/utilities/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "362482"
},
{
"name": "Python",
"bytes": "349342"
},
{
"name": "Shell",
"bytes": "1801"
}
],
"symlink_target": ""
} |
"""Create a static WebGL library and run it in the browser."""
from __future__ import absolute_import, print_function
import os, shutil, SimpleHTTPServer, SocketServer
import tvm
from tvm.contrib import emscripten, util
import numpy as np
def try_static_webgl_library():
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# Change to lib/ which contains "libtvm_runtime.bc".
os.chdir(os.path.join(curr_path, "../../lib"))
# Create OpenGL module.
n = tvm.var("n")
A = tvm.placeholder((n,), name='A', dtype="float")
B = tvm.compute((n,), lambda *i: A[i], name="B")
s = tvm.create_schedule(B.op)
s[B].opengl()
target_host = "llvm -target=asmjs-unknown-emscripten -system-lib"
f = tvm.build(s, [A, B], name="identity", target="opengl",
target_host=target_host)
# Create a JS library that contains both the module and the tvm runtime.
path_dso = "identity_static.js"
f.export_library(path_dso, emscripten.create_js, options=[
"-s", "USE_GLFW=3",
"-s", "USE_WEBGL2=1",
"-lglfw",
])
# Create "tvm_runtime.js" and "identity_static.html" in lib/
shutil.copyfile(os.path.join(curr_path, "../../web/tvm_runtime.js"),
"tvm_runtime.js")
shutil.copyfile(os.path.join(curr_path, "test_static_webgl_library.html"),
"identity_static.html")
port = 8080
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", port), handler)
print("Please open http://localhost:" + str(port) + "/identity_static.html")
httpd.serve_forever()
if __name__ == "__main__":
try_static_webgl_library()
| {
"content_hash": "24e03d5b6393b84d00cbbab4be2f8808",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 80,
"avg_line_length": 34.795918367346935,
"alnum_prop": 0.6304985337243402,
"repo_name": "phisiart/tvm",
"id": "262416c42506116e177754fce66fbe97dd91b6cd",
"size": "1705",
"binary": false,
"copies": "2",
"ref": "refs/heads/opengl",
"path": "tests/webgl/test_static_webgl_library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "23532"
},
{
"name": "C++",
"bytes": "1821664"
},
{
"name": "CMake",
"bytes": "11122"
},
{
"name": "HTML",
"bytes": "2885"
},
{
"name": "Java",
"bytes": "110991"
},
{
"name": "JavaScript",
"bytes": "40902"
},
{
"name": "Makefile",
"bytes": "22895"
},
{
"name": "Objective-C",
"bytes": "8288"
},
{
"name": "Objective-C++",
"bytes": "32208"
},
{
"name": "Python",
"bytes": "1052297"
},
{
"name": "Shell",
"bytes": "17831"
},
{
"name": "Verilog",
"bytes": "24380"
}
],
"symlink_target": ""
} |
import Adafruit_BBIO.PWM as PWM
class Servo(object):
'''Object representing a servo motor utilizing the Adafruit_BBIO library.
This class defines an object to manipulate a servo motor on a BeagleBone
Black (BBB). The class utilizes the PWM module of the Adafruit_BBIO library
for pulse width manipulation.
Attributes:
pin (string): The physical pin on the BBB (i.e. 'P9_14')
servo_range (tuple): Max and min of servo range in degrees
ppm_range (tuple): Max and min of the PPM pulse width in milliseconds
ppm_freq (number): Frequency of the PPM/PWM driver
position (number): The position of the servo in degrees
position_pulse_width (number): Position of the servo as pulse width
position_duty_cycle (number): Position of the servo as a PWM duty cycle
initialized (boolean): Object initialization status
'''
pin = ''
'''BBB PWM pin in use'''
servo_range = (0, 180) # degrees
'''Range of the servo in degrees'''
ppm_range = (0.5, 2.5)
'''Range of the PPM pulse width for servo control in milliseconds'''
ppm_freq = 50
'''PWM/PPM Driver frequency'''
def __init__(self, pin, start, **kwargs):
'''Initialize a Servo object.'''
self.pin = pin
for key in ('servo_range', 'ppm_range', 'ppm_freq'):
if key in kwargs:
setattr(self, key, kwargs[key])
self.initialized = False
self.position = start
@property
def position(self):
'''Current servo position - assignment moves the servo'''
return self._position
@position.setter
def position(self, value):
if self.servo_range[0] <= value and value <= self.servo_range[1]:
self._position = value
if self.initialized:
PWM.set_duty_cycle(self.pin, self._position_duty_cycle())
else:
PWM.start(self.pin, self._position_duty_cycle(), self.ppm_freq)
self.initialized = True
else:
message_string = 'Servo commanded to {}. Valid range {}.'
raise Exception(message_string.format(value, self.servo_range))
def cleanup(self):
'''Execute steps to clean up the PWM hardware'''
PWM.stop(self.pin)
PWM.cleanup()
self.initialized = False
def _position_duty_cycle(self):
'''Current servo position as a PWM duty cycle'''
ppm_delta = self.ppm_range[1] - self.ppm_range[0]
range_delta = self.servo_range[1] - self.servo_range[0]
pw_per_degree = ppm_delta / float(range_delta)
relative_position = self.position - self.servo_range[0]
relative_pulse_width = relative_position * pw_per_degree
absolute_pulse_width = relative_pulse_width + self.ppm_range[0]
pulse_width_seconds = absolute_pulse_width / float(1000)
return (pulse_width_seconds * self.ppm_freq) * 100
| {
"content_hash": "427dd32b03d120f03b542fd1e35d11c9",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 37.84615384615385,
"alnum_prop": 0.6205962059620597,
"repo_name": "SpinStabilized/bbb-primer",
"id": "3f59d2fdd2dd2bd955622b50af90e895daad6ef6",
"size": "2952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter12/bbbservo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1229"
},
{
"name": "JavaScript",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "40014"
},
{
"name": "Shell",
"bytes": "2061"
}
],
"symlink_target": ""
} |
"""URL endpoint to add new graph data to the datastore."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import logging
import six
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from dashboard import add_point
from dashboard import find_anomalies
from dashboard import graph_revisions
from dashboard import units_to_direction
from dashboard import sheriff_config_client
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from flask import request, make_response
def AddPointQueuePost():
"""Adds a set of points from the post data.
Request parameters:
data: JSON encoding of a list of dictionaries. Each dictionary represents
one point to add. For each dict, one Row entity will be added, and
any required TestMetadata or Master or Bot entities will be created.
"""
datastore_hooks.SetPrivilegedRequest(flask_flag=True)
data = json.loads(request.values.get('data'))
_PrewarmGets(data)
all_put_futures = []
added_rows = []
parent_tests = []
for row_dict in data:
try:
new_row, parent_test, put_futures = _AddRow(row_dict)
added_rows.append(new_row)
parent_tests.append(parent_test)
all_put_futures.extend(put_futures)
except add_point.BadRequestError as e:
logging.error('Could not add %s, it was invalid.', str(e))
except datastore_errors.BadRequestError as e:
logging.info('While trying to store %s', row_dict)
logging.error('Datastore request failed: %s.', str(e))
return request_handler.RequestHandlerReportError(
'Datastore request failed: %s.' % str(e), status=400)
ndb.Future.wait_all(all_put_futures)
client = sheriff_config_client.GetSheriffConfigClient()
tests_keys = []
for t in parent_tests:
reason = []
subscriptions, _ = client.Match(t.test_path, check=True)
if not subscriptions:
reason.append('subscriptions')
if not t.has_rows:
reason.append('has_rows')
if IsRefBuild(t.key):
reason.append('RefBuild')
if reason:
logging.info('Skip test: %s reason=%s', t.key, ','.join(reason))
continue
logging.info('Process test: %s', t.key)
tests_keys.append(t.key)
# Updating of the cached graph revisions should happen after put because
# it requires the new row to have a timestamp, which happens upon put.
futures = [
graph_revisions.AddRowsToCacheAsync(added_rows),
find_anomalies.ProcessTestsAsync(tests_keys)
]
ndb.Future.wait_all(futures)
return make_response('')
if six.PY2:
class AddPointQueueHandler(request_handler.RequestHandler):
"""Request handler to process points and add them to the datastore.
This request handler is intended to be used only by requests using the
task queue; it shouldn't be directly from outside.
"""
def get(self):
"""A get request is the same a post request for this endpoint."""
logging.debug('crbug/1298177 - add_point_queue GET triggered')
self.post()
def post(self):
"""Adds a set of points from the post data.
Request parameters:
data: JSON encoding of a list of dictionaries. Each dictionary represents
one point to add. For each dict, one Row entity will be added, and
any required TestMetadata or Master or Bot entities will be created.
"""
logging.debug('crbug/1298177 - add_point_queue POST triggered')
datastore_hooks.SetPrivilegedRequest()
data = json.loads(self.request.get('data'))
_PrewarmGets(data)
all_put_futures = []
added_rows = []
parent_tests = []
for row_dict in data:
try:
new_row, parent_test, put_futures = _AddRow(row_dict)
added_rows.append(new_row)
parent_tests.append(parent_test)
all_put_futures.extend(put_futures)
except add_point.BadRequestError as e:
logging.error('Could not add %s, it was invalid.', str(e))
except datastore_errors.BadRequestError as e:
logging.info('While trying to store %s', row_dict)
logging.error('Datastore request failed: %s.', str(e))
return
ndb.Future.wait_all(all_put_futures)
client = sheriff_config_client.GetSheriffConfigClient()
tests_keys = []
for t in parent_tests:
reason = []
subscriptions, _ = client.Match(t.test_path, check=True)
if not subscriptions:
reason.append('subscriptions')
if not t.has_rows:
reason.append('has_rows')
if IsRefBuild(t.key):
reason.append('RefBuild')
if reason:
logging.info('Skip test: %s reason=%s', t.key, ','.join(reason))
continue
logging.info('Process test: %s', t.key)
tests_keys.append(t.key)
# Updating of the cached graph revisions should happen after put because
# it requires the new row to have a timestamp, which happens upon put.
futures = [
graph_revisions.AddRowsToCacheAsync(added_rows),
find_anomalies.ProcessTestsAsync(tests_keys)
]
ndb.Future.wait_all(futures)
def _PrewarmGets(data):
"""Prepares the cache so that fetching is faster later.
The add_point request handler does a LOT of gets, and it's possible for
each to take seconds.
However, NDB will does automatic in-context caching:
https://developers.google.com/appengine/docs/python/ndb/cache#incontext
This means that doing an async get() at the start will cache the result, so
that we can prewarm the cache for everything we'll need throughout the
request at the start.
Args:
data: The request json.
"""
# Prewarm lookups of masters, bots, and tests.
master_keys = {ndb.Key('Master', r['master']) for r in data}
bot_keys = {ndb.Key('Master', r['master'], 'Bot', r['bot']) for r in data}
test_keys = set()
for row in data:
start = '%s/%s' % (row['master'], row['bot'])
test_parts = row['test'].split('/')
for part in test_parts:
if not part:
break
start += '/%s' % part
test_keys.add(ndb.Key('TestMetadata', start))
ndb.get_multi_async(list(master_keys) + list(bot_keys) + list(test_keys))
def _AddRow(row_dict):
"""Adds a Row entity to the datastore.
There are three main things that are needed in order to make a new entity;
the ID, the parent key, and all of the properties. Making these three
things, and validating the related input fields, are delegated to
sub-functions.
Args:
row_dict: A dictionary obtained from the JSON that was received.
Returns:
A triple: The new row, the parent test, and a list of entity put futures.
Raises:
add_point.BadRequestError: The input dict was invalid.
RuntimeError: The required parent entities couldn't be created.
"""
parent_test = _GetParentTest(row_dict)
test_container_key = utils.GetTestContainerKey(parent_test.key)
columns = add_point.GetAndValidateRowProperties(row_dict)
row_id = add_point.GetAndValidateRowId(row_dict)
# Update the last-added revision record for this test.
master, bot, test = row_dict['master'], row_dict['bot'], row_dict['test']
test_path = '%s/%s/%s' % (master, bot, test)
last_added_revision_entity = graph_data.LastAddedRevision(
id=test_path, revision=row_id)
entity_put_futures = []
entity_put_futures.append(last_added_revision_entity.put_async())
# If the row ID isn't the revision, that means that the data is Chrome OS
# data, and we want the default revision to be Chrome version.
if row_id != row_dict.get('revision'):
columns['a_default_rev'] = 'r_chrome_version'
# Create the entity and add it asynchronously.
new_row = graph_data.Row(id=row_id, parent=test_container_key, **columns)
entity_put_futures.append(new_row.put_async())
entity_put_futures.append(new_row.UpdateParentAsync())
return new_row, parent_test, entity_put_futures
def _GetParentTest(row_dict):
"""Gets the parent test for a Row based on an input dictionary.
Args:
row_dict: A dictionary from the data parameter.
Returns:
A TestMetadata entity.
Raises:
RuntimeError: Something went wrong when trying to get the parent test.
"""
master_name = row_dict.get('master')
bot_name = row_dict.get('bot')
test_name = row_dict.get('test').strip('/')
units = row_dict.get('units')
higher_is_better = row_dict.get('higher_is_better')
improvement_direction = _ImprovementDirection(higher_is_better)
internal_only = graph_data.Bot.GetInternalOnlySync(master_name, bot_name)
benchmark_description = row_dict.get('benchmark_description')
unescaped_story_name = row_dict.get('unescaped_story_name')
parent_test = GetOrCreateAncestors(
master_name,
bot_name,
test_name,
internal_only=internal_only,
benchmark_description=benchmark_description,
units=units,
improvement_direction=improvement_direction,
unescaped_story_name=unescaped_story_name)
return parent_test
def _ImprovementDirection(higher_is_better):
"""Returns an improvement direction (constant from alerts_data) or None."""
if higher_is_better is None:
return None
return anomaly.UP if higher_is_better else anomaly.DOWN
def GetOrCreateAncestors(master_name,
bot_name,
test_name,
internal_only=True,
benchmark_description='',
units=None,
improvement_direction=None,
unescaped_story_name=None):
"""Gets or creates all parent Master, Bot, TestMetadata entities for a Row."""
master_entity = _GetOrCreateMaster(master_name)
_GetOrCreateBot(bot_name, master_entity.key, internal_only)
# Add all ancestor tests to the datastore in order.
ancestor_test_parts = test_name.split('/')
test_path = '%s/%s' % (master_name, bot_name)
suite = None
for index, ancestor_test_name in enumerate(ancestor_test_parts):
# Certain properties should only be updated if the TestMetadata is for a
# leaf test.
is_leaf_test = (index == len(ancestor_test_parts) - 1)
test_properties = {
'units': units if is_leaf_test else None,
'internal_only': internal_only,
}
if is_leaf_test and improvement_direction is not None:
test_properties['improvement_direction'] = improvement_direction
if is_leaf_test and unescaped_story_name is not None:
test_properties['unescaped_story_name'] = unescaped_story_name
ancestor_test = _GetOrCreateTest(ancestor_test_name, test_path,
test_properties)
if index == 0:
suite = ancestor_test
test_path = ancestor_test.test_path
if benchmark_description and suite.description != benchmark_description:
suite.description = benchmark_description
return ancestor_test
def _GetOrCreateMaster(name):
"""Gets or creates a new Master."""
existing = graph_data.Master.get_by_id(name)
if existing:
return existing
new_entity = graph_data.Master(id=name)
new_entity.put()
return new_entity
def _GetOrCreateBot(name, parent_key, internal_only):
"""Gets or creates a new Bot under the given Master."""
existing = graph_data.Bot.get_by_id(name, parent=parent_key)
if existing:
if existing.internal_only != internal_only:
existing.internal_only = internal_only
existing.put()
return existing
logging.info('Adding bot %s/%s', parent_key.id(), name)
new_entity = graph_data.Bot(
id=name, parent=parent_key, internal_only=internal_only)
new_entity.put()
return new_entity
def _GetOrCreateTest(name, parent_test_path, properties):
"""Either gets an entity if it already exists, or creates one.
If the entity already exists but the properties are different than the ones
specified, then the properties will be updated first. This implies that a
new point is being added for an existing TestMetadata, so if the TestMetadata
has been previously marked as deprecated then it can be updated and marked as
non-deprecated.
If the entity doesn't yet exist, a new one will be created with the given
properties.
Args:
name: The string ID of the Test to get or create.
parent_test_path: The test_path of the parent entity.
properties: A dictionary of properties that should be set.
Returns:
An entity (which has already been put).
Raises:
datastore_errors.BadRequestError: Something went wrong getting the entity.
"""
test_path = '%s/%s' % (parent_test_path, name)
existing = graph_data.TestMetadata.get_by_id(test_path)
if not existing:
# Add improvement direction if this is a new test.
if 'units' in properties and 'improvement_direction' not in properties:
units = properties['units']
direction = units_to_direction.GetImprovementDirection(units)
properties['improvement_direction'] = direction
elif 'units' not in properties or properties['units'] is None:
properties['improvement_direction'] = anomaly.UNKNOWN
new_entity = graph_data.TestMetadata(id=test_path, **properties)
new_entity.UpdateSheriff()
new_entity.put()
# TODO(sullivan): Consider putting back Test entity in a scoped down
# form so we can check if it exists here.
return new_entity
# Flag indicating whether we want to re-put the entity before returning.
properties_changed = False
if existing.deprecated:
existing.deprecated = False
properties_changed = True
# Special case to update improvement direction from units for TestMetadata
# entities when units are being updated. If an improvement direction is
# explicitly provided in the properties, then we can skip this check since it
# will get overwritten below. Additionally, by skipping we avoid
# touching the entity and setting off an expensive put() operation.
if properties.get('improvement_direction') is None:
units = properties.get('units')
if units:
direction = units_to_direction.GetImprovementDirection(units)
if direction != existing.improvement_direction:
properties['improvement_direction'] = direction
# Go through the list of general properties and update if necessary.
for prop, value in list(properties.items()):
if (hasattr(existing, prop) and value is not None
and getattr(existing, prop) != value):
setattr(existing, prop, value)
properties_changed = True
if properties_changed:
existing.UpdateSheriff()
existing.put()
return existing
def IsRefBuild(test_key):
"""Checks whether a TestMetadata is for a reference build test run."""
test_parts = test_key.id().split('/')
return test_parts[-1] == 'ref' or test_parts[-1].endswith('_ref')
| {
"content_hash": "48bc4be3cba9a4defcfbb885e88ccbc1",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 81,
"avg_line_length": 35.67142857142857,
"alnum_prop": 0.6883593645708184,
"repo_name": "catapult-project/catapult",
"id": "6be578a5f11ababcb088e225ad9d4b11d6a60240",
"size": "15144",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "dashboard/dashboard/add_point_queue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
"""Presubmit tests for /tools/android/infobar_deprecation.
Runs Python unit tests in /tools/android/infobar_deprecation on upload.
"""
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
result = []
result.extend(
input_api.canned_checks.RunUnitTests(input_api,
output_api,
['./infobar_deprecation_test.py'],
run_on_python2=False))
return result
| {
"content_hash": "828b30a643ab46445d5ffeab62e5e850",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 31.625,
"alnum_prop": 0.5513833992094862,
"repo_name": "chromium/chromium",
"id": "4b77833e12099ca3539e66694dce975c207c3d97",
"size": "646",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tools/android/infobar_deprecation/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""A small wrapper around nosetests.
Avoids disruptive messages when viewing error messages.
"""
import sys
import nose
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
# Suppress sigma-clipping debug log:
logging.getLogger('tkp.sourcefinder.image.sigmaclip').setLevel(logging.ERROR)
# logging.getLogger().setLevel(logging.ERROR)
nose.run(argv=sys.argv)
| {
"content_hash": "ab2221c3d7cc7bf83581870ead4ad354",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 27.6,
"alnum_prop": 0.7270531400966184,
"repo_name": "transientskp/tkp",
"id": "a3425fe41710fce6ac508138745c26e8d8e9a290",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/runtests.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "1887"
},
{
"name": "Python",
"bytes": "960959"
},
{
"name": "Shell",
"bytes": "377"
}
],
"symlink_target": ""
} |
"""
In charge of OpenFlow 1.0 switches.
NOTE: This module is loaded automatically on startup unless POX is run
with --no-openflow .
"""
from pox.core import core
import pox
import pox.lib.util
from pox.lib.addresses import EthAddr
from pox.lib.revent.revent import EventMixin
import datetime
import time
from pox.lib.socketcapture import CaptureSocket
import pox.openflow.debug
from pox.openflow.util import make_type_to_unpacker_table
from pox.openflow import *
log = core.getLogger()
import socket
import select
# List where the index is an OpenFlow message type (OFPT_xxx), and
# the values are unpack functions that unpack the wire format of that
# type into a message object.
unpackers = make_type_to_unpacker_table()
try:
PIPE_BUF = select.PIPE_BUF
except:
try:
# Try to get it from where PyPy (sometimes) has it
import IN
PIPE_BUF = IN.PIPE_BUF
except:
# (Hopefully) reasonable default
PIPE_BUF = 512
import pox.openflow.libopenflow_01 as of
import threading
import os
import sys
import exceptions
from errno import EAGAIN, ECONNRESET, EADDRINUSE, EADDRNOTAVAIL
import traceback
def handle_HELLO (con, msg): #S
#con.msg("HELLO wire protocol " + hex(msg.version))
# Send a features request
msg = of.ofp_features_request()
con.send(msg)
def handle_ECHO_REPLY (con, msg):
#con.msg("Got echo reply")
pass
def handle_ECHO_REQUEST (con, msg): #S
reply = msg
reply.header_type = of.OFPT_ECHO_REPLY
con.send(reply)
def handle_FLOW_REMOVED (con, msg): #A
e = con.ofnexus.raiseEventNoErrors(FlowRemoved, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(FlowRemoved, con, msg)
def handle_FEATURES_REPLY (con, msg):
connecting = con.connect_time == None
con.features = msg
con.original_ports._ports = set(msg.ports)
con.ports._reset()
con.dpid = msg.datapath_id
if not connecting:
con.ofnexus._connect(con)
e = con.ofnexus.raiseEventNoErrors(FeaturesReceived, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(FeaturesReceived, con, msg)
return
nexus = core.OpenFlowConnectionArbiter.getNexus(con)
if nexus is None:
# Cancel connection
con.info("No OpenFlow nexus for " +
pox.lib.util.dpidToStr(msg.datapath_id))
con.disconnect()
return
con.ofnexus = nexus
con.ofnexus._connect(con)
#connections[con.dpid] = con
barrier = of.ofp_barrier_request()
listeners = []
def finish_connecting (event):
if event.xid != barrier.xid:
con.dpid = None
con.err("failed connect")
con.disconnect()
else:
con.info("connected")
con.connect_time = time.time()
e = con.ofnexus.raiseEventNoErrors(ConnectionUp, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(ConnectionUp, con, msg)
e = con.ofnexus.raiseEventNoErrors(FeaturesReceived, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(FeaturesReceived, con, msg)
con.removeListeners(listeners)
listeners.append(con.addListener(BarrierIn, finish_connecting))
def also_finish_connecting (event):
if event.xid != barrier.xid: return
if event.ofp.type != of.OFPET_BAD_REQUEST: return
if event.ofp.code != of.OFPBRC_BAD_TYPE: return
# Okay, so this is probably an HP switch that doesn't support barriers
# (ugh). We'll just assume that things are okay.
finish_connecting(event)
listeners.append(con.addListener(ErrorIn, also_finish_connecting))
#TODO: Add a timeout for finish_connecting
if con.ofnexus.miss_send_len is not None:
con.send(of.ofp_set_config(miss_send_len =
con.ofnexus.miss_send_len))
if con.ofnexus.clear_flows_on_connect:
con.send(of.ofp_flow_mod(match=of.ofp_match(),command=of.OFPFC_DELETE))
con.send(barrier)
"""
# Hack for old versions of cbench
class C (object):
xid = barrier.xid
finish_connecting(C())
"""
def handle_STATS_REPLY (con, msg):
e = con.ofnexus.raiseEventNoErrors(RawStatsReply, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(RawStatsReply, con, msg)
con._incoming_stats_reply(msg)
def handle_PORT_STATUS (con, msg): #A
if msg.reason == of.OFPPR_DELETE:
con.ports._forget(msg.desc)
else:
con.ports._update(msg.desc)
e = con.ofnexus.raiseEventNoErrors(PortStatus, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PortStatus, con, msg)
def handle_PACKET_IN (con, msg): #A
e = con.ofnexus.raiseEventNoErrors(PacketIn, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PacketIn, con, msg)
def handle_ERROR_MSG (con, msg): #A
err = ErrorIn(con, msg)
e = con.ofnexus.raiseEventNoErrors(err)
if e is None or e.halt != True:
con.raiseEventNoErrors(err)
# if err.should_log:
# log.error(str(con) + " OpenFlow Error:\n" +
# msg.show(str(con) + " Error: ").strip())
def handle_BARRIER (con, msg):
e = con.ofnexus.raiseEventNoErrors(BarrierIn, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(BarrierIn, con, msg)
# handlers for stats replies
def handle_OFPST_DESC (con, parts):
msg = parts[0].body
e = con.ofnexus.raiseEventNoErrors(SwitchDescReceived,con,parts[0],msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(SwitchDescReceived, con, parts[0], msg)
def handle_OFPST_FLOW (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(FlowStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(FlowStatsReceived, con, parts, msg)
def handle_OFPST_AGGREGATE (con, parts):
msg = parts[0].body
e = con.ofnexus.raiseEventNoErrors(AggregateFlowStatsReceived, con,
parts[0], msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(AggregateFlowStatsReceived, con, parts[0], msg)
def handle_OFPST_TABLE (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(TableStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(TableStatsReceived, con, parts, msg)
def handle_OFPST_PORT (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(PortStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PortStatsReceived, con, parts, msg)
def handle_OFPST_QUEUE (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(QueueStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(QueueStatsReceived, con, parts, msg)
def handle_VENDOR (con, msg):
log.info("Vendor msg: " + str(msg))
# A list, where the index is an OFPT, and the value is a function to
# call for that type
# This is generated automatically based on handlerMap
handlers = []
# Message handlers
handlerMap = {
of.OFPT_HELLO : handle_HELLO,
of.OFPT_ECHO_REQUEST : handle_ECHO_REQUEST,
of.OFPT_ECHO_REPLY : handle_ECHO_REPLY,
of.OFPT_PACKET_IN : handle_PACKET_IN,
of.OFPT_FEATURES_REPLY : handle_FEATURES_REPLY,
of.OFPT_PORT_STATUS : handle_PORT_STATUS,
of.OFPT_ERROR : handle_ERROR_MSG,
of.OFPT_BARRIER_REPLY : handle_BARRIER,
of.OFPT_STATS_REPLY : handle_STATS_REPLY,
of.OFPT_FLOW_REMOVED : handle_FLOW_REMOVED,
of.OFPT_VENDOR : handle_VENDOR,
}
statsHandlerMap = {
of.OFPST_DESC : handle_OFPST_DESC,
of.OFPST_FLOW : handle_OFPST_FLOW,
of.OFPST_AGGREGATE : handle_OFPST_AGGREGATE,
of.OFPST_TABLE : handle_OFPST_TABLE,
of.OFPST_PORT : handle_OFPST_PORT,
of.OFPST_QUEUE : handle_OFPST_QUEUE,
}
# Deferred sending should be unusual, so don't worry too much about
# efficiency
class DeferredSender (threading.Thread):
"""
Class that handles sending when a socket write didn't complete
"""
def __init__ (self):
threading.Thread.__init__(self)
core.addListeners(self)
self._dataForConnection = {}
self._lock = threading.RLock()
self._waker = pox.lib.util.makePinger()
self.sending = False
self.start()
def _handle_GoingDownEvent (self, event):
self._waker.ping()
def _sliceup (self, data):
"""
Takes an array of data bytes, and slices into elements of
PIPE_BUF bytes each
"""
out = []
while len(data) > PIPE_BUF:
out.append(data[0:PIPE_BUF])
data = data[PIPE_BUF:]
if len(data) > 0:
out.append(data)
return out
def send (self, con, data):
with self._lock:
self.sending = True
data = self._sliceup(data)
if con not in self._dataForConnection:
self._dataForConnection[con] = data
else:
self._dataForConnection[con].extend(data)
self._waker.ping()
def kill (self, con):
with self._lock:
try:
del self._dataForConnection[con]
except:
pass
self._waker.ping()
def run (self):
while core.running:
with self._lock:
cons = self._dataForConnection.keys()
rlist, wlist, elist = select.select([self._waker], cons, cons, 5)
if not core.running: break
with self._lock:
if len(rlist) > 0:
self._waker.pongAll()
for con in elist:
try:
del self._dataForConnection[con]
except:
pass
for con in wlist:
try:
alldata = self._dataForConnection[con]
while len(alldata):
data = alldata[0]
try:
l = con.sock.send(data)
if l != len(data):
alldata[0] = data[l:]
break
del alldata[0]
except socket.error as (errno, strerror):
if errno != EAGAIN:
con.msg("DeferredSender/Socket error: " + strerror)
con.disconnect()
del self._dataForConnection[con]
break
except:
con.msg("Unknown error doing deferred sending")
break
if len(alldata) == 0:
try:
del self._dataForConnection[con]
if len(self._dataForConnection) == 0:
self.sending = False
break
except:
pass
except:
try:
del self._dataForConnection[con]
except:
pass
class DummyOFNexus (object):
def raiseEventNoErrors (self, event, *args, **kw):
log.warning("%s raised on dummy OpenFlow nexus" % event)
def raiseEvent (self, event, *args, **kw):
log.warning("%s raised on dummy OpenFlow nexus" % event)
def _disconnect (self, dpid):
log.warning("%s disconnected on dummy OpenFlow nexus",
pox.lib.util.dpidToStr(dpid))
_dummyOFNexus = DummyOFNexus()
"""
class FileCloser (object):
def __init__ (self):
from weakref import WeakSet
self.items = WeakSet()
core.addListeners(self)
import atexit
atexit.register(self._handle_DownEvent, None)
def _handle_DownEvent (self, event):
for item in self.items:
try:
item.close()
except Exception:
log.exception("Couldn't close a file while shutting down")
self.items.clear()
_itemcloser = FileCloser()
"""
class OFCaptureSocket (CaptureSocket):
"""
Captures OpenFlow data to a pcap file
"""
def __init__ (self, *args, **kw):
super(OFCaptureSocket,self).__init__(*args, **kw)
self._rbuf = bytes()
self._sbuf = bytes()
self._enabled = True
#_itemcloser.items.add(self)
def _recv_out (self, buf):
if not self._enabled: return
self._rbuf += buf
l = len(self._rbuf)
while l > 4:
if ord(self._rbuf[0]) != of.OFP_VERSION:
log.error("Bad OpenFlow version while trying to capture trace")
self._enabled = False
break
packet_length = ord(self._rbuf[2]) << 8 | ord(self._rbuf[3])
if packet_length > l: break
try:
self._writer.write(False, self._rbuf[:packet_length])
except Exception:
log.exception("Exception while writing controller trace")
self._enabled = False
self._rbuf = self._rbuf[packet_length:]
l = len(self._rbuf)
def _send_out (self, buf, r):
if not self._enabled: return
self._sbuf += buf
l = len(self._sbuf)
while l > 4:
if ord(self._sbuf[0]) != of.OFP_VERSION:
log.error("Bad OpenFlow version while trying to capture trace")
self._enabled = False
break
packet_length = ord(self._sbuf[2]) << 8 | ord(self._sbuf[3])
if packet_length > l: break
try:
self._writer.write(True, self._sbuf[:packet_length])
except Exception:
log.exception("Exception while writing controller trace")
self._enabled = False
self._sbuf = self._sbuf[packet_length:]
l = len(self._sbuf)
class PortCollection (object):
"""
Keeps track of lists of ports and provides nice indexing.
NOTE: It's possible this could be simpler by inheriting from UserDict,
but I couldn't swear without looking at UserDict in some detail,
so I just implemented a lot of stuff by hand.
"""
def __init__ (self):
self._ports = set()
self._masks = set()
self._chain = None
def _reset (self):
self._ports.clear()
self._masks.clear()
def _forget (self, port_no):
self._masks.add(port_no)
self._ports = set([p for p in self._ports if p.port_no != port_no])
def _update (self, port):
self._masks.discard(port.port_no)
self._ports = set([p for p in self._ports if p.port_no != port.port_no])
self._ports.add(port)
def __str__ (self):
if len(self) == 0:
return "<Ports: Empty>"
l = ["%s:%i"%(p.name,p.port_no) for p in sorted(self.values())]
return "<Ports: %s>" % (", ".join(l),)
def __len__ (self):
return len(self.keys())
def __getitem__ (self, index):
if isinstance(index, (int,long)):
for p in self._ports:
if p.port_no == index:
return p
elif isinstance(index, EthAddr):
for p in self._ports:
if p.hw_addr == index:
return p
else:
for p in self._ports:
if p.name == index:
return p
if self._chain:
p = self._chain[index]
if p.port_no not in self._masks:
return p
raise IndexError("No key %s" % (index,))
def keys (self):
if self._chain:
k = set(self._chain.keys())
k.difference_update(self._masks)
else:
k = set()
k.update([p.port_no for p in self._ports])
return list(k)
def __iter__ (self):
return iter(self.keys())
def iterkeys (self):
return iter(self.keys())
def __contains__ (self, index):
try:
self[index]
return True
except Exception:
pass
return False
def values (self):
return [self[k] for k in self.keys()]
def items (self):
return [(k,self[k]) for k in self.keys()]
def iterkeys (self):
return iter(self.keys())
def itervalues (self):
return iter(self.values())
def iteritems (self):
return iter(self.items())
def has_key (self, k):
return k in self
def get (self, k, default=None):
try:
return self[k]
except IndexError:
return default
def copy (self):
r = PortCollection()
r._ports = set(self.values())
class Connection (EventMixin):
"""
A Connection object represents a single TCP session with an
openflow-enabled switch.
If the switch reconnects, a new connection object is instantiated.
"""
_eventMixin_events = set([
ConnectionUp,
ConnectionDown,
PortStatus,
FlowRemoved,
PacketIn,
ErrorIn,
BarrierIn,
RawStatsReply,
SwitchDescReceived,
FlowStatsReceived,
AggregateFlowStatsReceived,
TableStatsReceived,
PortStatsReceived,
QueueStatsReceived,
FlowRemoved,
])
# Globally unique identifier for the Connection instance
ID = 0
def msg (self, m):
#print str(self), m
log.debug(str(self) + " " + str(m))
def err (self, m):
#print str(self), m
log.error(str(self) + " " + str(m))
def info (self, m):
pass
#print str(self), m
log.info(str(self) + " " + str(m))
def __init__ (self, sock):
self._previous_stats = []
self.ofnexus = _dummyOFNexus
self.sock = sock
self.buf = ''
Connection.ID += 1
self.ID = Connection.ID
# TODO: dpid and features don't belong here; they should be eventually
# be in topology.switch
self.dpid = None
self.features = None
self.disconnected = False
self.disconnection_raised = False
self.connect_time = None
self.idle_time = time.time()
self.send(of.ofp_hello())
self.original_ports = PortCollection()
self.ports = PortCollection()
self.ports._chain = self.original_ports
#TODO: set a time that makes sure we actually establish a connection by
# some timeout
@property
def eth_addr (self):
dpid = self.dpid
if self.dpid is None:
raise RuntimeError("eth_addr not available")
return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,))
def fileno (self):
return self.sock.fileno()
def close (self):
self.disconnect('closed')
try:
self.sock.close()
except:
pass
def disconnect (self, msg = 'disconnected', defer_event = False):
"""
disconnect this Connection (usually not invoked manually).
"""
if self.disconnected:
self.msg("already disconnected")
self.info(msg)
self.disconnected = True
try:
self.ofnexus._disconnect(self.dpid)
except:
pass
if self.dpid is not None:
if not self.disconnection_raised and not defer_event:
self.disconnection_raised = True
self.ofnexus.raiseEventNoErrors(ConnectionDown, self)
self.raiseEventNoErrors(ConnectionDown, self)
try:
#deferredSender.kill(self)
pass
except:
pass
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
try:
pass
#TODO disconnect notification
except:
pass
def send (self, data):
"""
Send data to the switch.
Data should probably either be raw bytes in OpenFlow wire format, or
an OpenFlow controller-to-switch message object from libopenflow.
"""
if self.disconnected: return
if type(data) is not bytes:
# There's actually no reason the data has to be an instance of
# ofp_header, but this check is likely to catch a lot of bugs,
# so we check it anyway.
assert isinstance(data, of.ofp_header)
data = data.pack()
if deferredSender.sending:
log.debug("deferred sender is sending!")
deferredSender.send(self, data)
return
try:
l = self.sock.send(data)
if l != len(data):
self.msg("Didn't send complete buffer.")
data = data[l:]
deferredSender.send(self, data)
except socket.error as (errno, strerror):
if errno == EAGAIN:
self.msg("Out of send buffer space. " +
"Consider increasing SO_SNDBUF.")
deferredSender.send(self, data)
else:
self.msg("Socket error: " + strerror)
self.disconnect(defer_event=True)
def read (self):
"""
Read data from this connection. Generally this is just called by the
main OpenFlow loop below.
Note: This function will block if data is not available.
"""
try:
d = self.sock.recv(2048)
except:
return False
if len(d) == 0:
return False
self.buf += d
buf_len = len(self.buf)
offset = 0
while buf_len - offset >= 8: # 8 bytes is minimum OF message size
# We pull the first four bytes of the OpenFlow header off by hand
# (using ord) to find the version/length/type so that we can
# correctly call libopenflow to unpack it.
ofp_type = ord(self.buf[offset+1])
if ord(self.buf[offset]) != of.OFP_VERSION:
if ofp_type == of.OFPT_HELLO:
# We let this through and hope the other side switches down.
pass
else:
log.warning("Bad OpenFlow version (0x%02x) on connection %s"
% (ord(self.buf[offset]), self))
return False # Throw connection away
msg_length = ord(self.buf[offset+2]) << 8 | ord(self.buf[offset+3])
if buf_len - offset < msg_length: break
new_offset,msg = unpackers[ofp_type](self.buf, offset)
assert new_offset - offset == msg_length
offset = new_offset
try:
h = handlers[ofp_type]
h(self, msg)
except:
log.exception("%s: Exception while handling OpenFlow message:\n" +
"%s %s", self,self,
("\n" + str(self) + " ").join(str(msg).split('\n')))
continue
if offset != 0:
self.buf = self.buf[offset:]
return True
def _incoming_stats_reply (self, ofp):
# This assumes that you don't receive multiple stats replies
# to different requests out of order/interspersed.
if not ofp.is_last_reply:
if ofp.type not in [of.OFPST_FLOW, of.OFPST_TABLE,
of.OFPST_PORT, of.OFPST_QUEUE]:
log.error("Don't know how to aggregate stats message of type " +
str(ofp.type))
self._previous_stats = []
return
if len(self._previous_stats) != 0:
if ((ofp.xid == self._previous_stats[0].xid) and
(ofp.type == self._previous_stats[0].type)):
self._previous_stats.append(ofp)
else:
log.error("Was expecting continued stats of type %i with xid %i, " +
"but got type %i with xid %i" %
(self._previous_stats_reply.xid,
self._previous_stats_reply.type,
ofp.xid, ofp.type))
self._previous_stats = [ofp]
else:
self._previous_stats = [ofp]
if ofp.is_last_reply:
handler = statsHandlerMap.get(self._previous_stats[0].type, None)
s = self._previous_stats
self._previous_stats = []
if handler is None:
log.warn("No handler for stats of type " +
str(self._previous_stats[0].type))
return
handler(self, s)
def __str__ (self):
#return "[Con " + str(self.ID) + "/" + str(self.dpid) + "]"
if self.dpid is None:
d = str(self.dpid)
else:
d = pox.lib.util.dpidToStr(self.dpid)
return "[%s %i]" % (d, self.ID)
def wrap_socket (new_sock):
fname = datetime.datetime.now().strftime("%Y-%m-%d-%I%M%p")
fname += "_" + new_sock.getpeername()[0].replace(".", "_")
fname += "_" + `new_sock.getpeername()[1]` + ".pcap"
pcapfile = file(fname, "w")
try:
new_sock = OFCaptureSocket(new_sock, pcapfile,
local_addrs=(None,None,6633))
except Exception:
import traceback
traceback.print_exc()
pass
return new_sock
from pox.lib.recoco.recoco import *
class OpenFlow_01_Task (Task):
"""
The main recoco thread for listening to openflow messages
"""
def __init__ (self, port = 6633, address = '0.0.0.0'):
Task.__init__(self)
self.port = int(port)
self.address = address
self.started = False
core.addListener(pox.core.GoingUpEvent, self._handle_GoingUpEvent)
def _handle_GoingUpEvent (self, event):
self.start()
def start (self):
if self.started:
return
self.started = True
return super(OpenFlow_01_Task,self).start()
def run (self):
# List of open sockets/connections to select on
sockets = []
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
listener.bind((self.address, self.port))
except socket.error as (errno, strerror):
log.error("Error %i while binding socket: %s", errno, strerror)
if errno == EADDRNOTAVAIL:
log.error(" You may be specifying a local address which is "
"not assigned to any interface.")
elif errno == EADDRINUSE:
log.error(" You may have another controller running.")
log.error(" Use openflow.of_01 --port=<port> to run POX on "
"another port.")
return
listener.listen(16)
sockets.append(listener)
log.debug("Listening on %s:%s" %
(self.address, self.port))
con = None
while core.running:
try:
while True:
con = None
rlist, wlist, elist = yield Select(sockets, [], sockets, 5)
if len(rlist) == 0 and len(wlist) == 0 and len(elist) == 0:
if not core.running: break
for con in elist:
if con is listener:
raise RuntimeError("Error on listener socket")
else:
try:
con.close()
except:
pass
try:
sockets.remove(con)
except:
pass
timestamp = time.time()
for con in rlist:
if con is listener:
new_sock = listener.accept()[0]
if pox.openflow.debug.pcap_traces:
new_sock = wrap_socket(new_sock)
new_sock.setblocking(0)
# Note that instantiating a Connection object fires a
# ConnectionUp event (after negotation has completed)
newcon = Connection(new_sock)
sockets.append( newcon )
#print str(newcon) + " connected"
else:
con.idle_time = timestamp
if con.read() is False:
con.close()
sockets.remove(con)
except exceptions.KeyboardInterrupt:
break
except:
doTraceback = True
if sys.exc_info()[0] is socket.error:
if sys.exc_info()[1][0] == ECONNRESET:
con.info("Connection reset")
doTraceback = False
if doTraceback:
log.exception("Exception reading connection " + str(con))
if con is listener:
log.error("Exception on OpenFlow listener. Aborting.")
break
try:
con.close()
except:
pass
try:
sockets.remove(con)
except:
pass
log.debug("No longer listening for connections")
#pox.core.quit()
def _set_handlers ():
handlers.extend([None] * (1 + sorted(handlerMap.keys(),reverse=True)[0]))
for h in handlerMap:
handlers[h] = handlerMap[h]
#print handlerMap[h]
_set_handlers()
# Used by the Connection class
deferredSender = None
def launch (port = 6633, address = "0.0.0.0"):
if core.hasComponent('of_01'):
return None
global deferredSender
deferredSender = DeferredSender()
if of._logger is None:
of._logger = core.getLogger('libopenflow_01')
l = OpenFlow_01_Task(port = int(port), address = address)
core.register("of_01", l)
return l
| {
"content_hash": "cd7d524c92bf86d61a2452445ec3c75f",
"timestamp": "",
"source": "github",
"line_count": 953,
"max_line_length": 76,
"avg_line_length": 28.388247639034628,
"alnum_prop": 0.6140681599763436,
"repo_name": "kulawczukmarcin/mypox",
"id": "88db0648dbf73199c391a03b71af1609eba35b5c",
"size": "27639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pox/openflow/of_01.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "381"
},
{
"name": "C++",
"bytes": "18894"
},
{
"name": "HTML",
"bytes": "999"
},
{
"name": "JavaScript",
"bytes": "9048"
},
{
"name": "Python",
"bytes": "1315017"
},
{
"name": "Shell",
"bytes": "18433"
}
],
"symlink_target": ""
} |
""" WARNING! this module is incomplete and may have rough edges. Use only
if necessary
"""
import py
from struct import unpack
from rpython.rlib.rstruct.formatiterator import FormatIterator
from rpython.rlib.rstruct.error import StructError
from rpython.rlib.objectmodel import specialize
from rpython.rlib.buffer import StringBuffer
class MasterReader(object):
def __init__(self, s):
self.inputbuf = StringBuffer(s)
self.length = len(s)
self.inputpos = 0
def can_advance(self, count):
end = self.inputpos + count
return end <= self.length
def advance(self, count):
if not self.can_advance(count):
raise StructError("unpack str size too short for format")
self.inputpos += count
def read(self, count):
curpos = self.inputpos
end = curpos + count
self.advance(count) # raise if we are out of bound
return self.inputbuf.getslice(curpos, end, 1, count)
def align(self, mask):
self.inputpos = (self.inputpos + mask) & ~mask
class AbstractReader(object):
pass
def reader_for_pos(pos):
class ReaderForPos(AbstractReader):
def __init__(self, mr, bigendian):
self.mr = mr
self.bigendian = bigendian
def read(self, count):
return self.mr.read(count)
def appendobj(self, value):
self.value = value
def get_buffer_and_pos(self):
return self.mr.inputbuf, self.mr.inputpos
def can_advance(self, size):
return self.mr.can_advance(size)
def advance(self, size):
self.mr.advance(size)
ReaderForPos.__name__ = 'ReaderForPos%d' % pos
return ReaderForPos
class FrozenUnpackIterator(FormatIterator):
def __init__(self, fmt):
self.formats = []
self.fmt = fmt
def operate(self, fmtdesc, repetitions):
if fmtdesc.needcount:
self.formats.append((fmtdesc, repetitions, None))
else:
for i in range(repetitions):
self.formats.append((fmtdesc, 1, None))
def align(self, mask):
if self.formats:
fmt, rep, _ = self.formats.pop()
self.formats.append((fmt, rep, mask))
def _create_unpacking_func(self):
rg = range(len(self.formats))
perform_lst = []
miniglobals = {}
miniglobals.update(globals())
miniglobals['bigendian'] = self.bigendian
for i in rg:
fmtdesc, rep, mask = self.formats[i]
miniglobals['unpacker%d' % i] = fmtdesc.unpack
if mask is not None:
perform_lst.append('master_reader.align(%d)' % mask)
if not fmtdesc.needcount:
perform_lst.append('unpacker%d(reader%d)' % (i, i))
else:
perform_lst.append('unpacker%d(reader%d, %d)' % (i, i, rep))
miniglobals['reader_cls%d' % i] = reader_for_pos(i)
readers = ";".join(["reader%d = reader_cls%d(master_reader, bigendian)"
% (i, i) for i in rg])
perform = ";".join(perform_lst)
unpackers = ','.join(['reader%d.value' % i for i in rg])
source = py.code.Source("""
def unpack(s):
master_reader = MasterReader(s)
%(readers)s
%(perform)s
return (%(unpackers)s)
""" % locals())
exec source.compile() in miniglobals
self.unpack = miniglobals['unpack'] # override not-rpython version
def _freeze_(self):
assert self.formats
self._create_unpacking_func()
return True
@specialize.memo()
def create_unpacker(unpack_str):
fmtiter = FrozenUnpackIterator(unpack_str)
fmtiter.interpret(unpack_str)
assert fmtiter._freeze_()
return fmtiter
@specialize.arg(0)
def runpack(fmt, input):
unpacker = create_unpacker(fmt)
return unpacker.unpack(input)
| {
"content_hash": "d6d01826ef82a9401687fe19120e3608",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 79,
"avg_line_length": 31.701612903225808,
"alnum_prop": 0.5937420503688629,
"repo_name": "oblique-labs/pyVM",
"id": "0da9705bc245f2215dca079728388f4498076a39",
"size": "3932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/rlib/rstruct/runpack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
} |
"""Config flow for UPNP."""
from __future__ import annotations
import asyncio
from collections.abc import Mapping
from datetime import timedelta
from typing import Any, cast
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.components.ssdp import SsdpChange, SsdpServiceInfo
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from .const import (
CONFIG_ENTRY_HOSTNAME,
CONFIG_ENTRY_SCAN_INTERVAL,
CONFIG_ENTRY_ST,
CONFIG_ENTRY_UDN,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
LOGGER,
SSDP_SEARCH_TIMEOUT,
ST_IGD_V1,
ST_IGD_V2,
)
def _friendly_name_from_discovery(discovery_info: ssdp.SsdpServiceInfo) -> str:
"""Extract user-friendly name from discovery."""
return cast(
str,
discovery_info.upnp.get(ssdp.ATTR_UPNP_FRIENDLY_NAME)
or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME)
or discovery_info.ssdp_headers.get("_host", ""),
)
def _is_complete_discovery(discovery_info: ssdp.SsdpServiceInfo) -> bool:
"""Test if discovery is complete and usable."""
return bool(
ssdp.ATTR_UPNP_UDN in discovery_info.upnp
and discovery_info.ssdp_st
and discovery_info.ssdp_location
and discovery_info.ssdp_usn
)
async def _async_wait_for_discoveries(hass: HomeAssistant) -> bool:
"""Wait for a device to be discovered."""
device_discovered_event = asyncio.Event()
async def device_discovered(info: SsdpServiceInfo, change: SsdpChange) -> None:
if change == SsdpChange.BYEBYE:
return
LOGGER.info(
"Device discovered: %s, at: %s",
info.ssdp_usn,
info.ssdp_location,
)
device_discovered_event.set()
cancel_discovered_callback_1 = await ssdp.async_register_callback(
hass,
device_discovered,
{
ssdp.ATTR_SSDP_ST: ST_IGD_V1,
},
)
cancel_discovered_callback_2 = await ssdp.async_register_callback(
hass,
device_discovered,
{
ssdp.ATTR_SSDP_ST: ST_IGD_V2,
},
)
try:
await asyncio.wait_for(
device_discovered_event.wait(), timeout=SSDP_SEARCH_TIMEOUT
)
except asyncio.TimeoutError:
return False
finally:
cancel_discovered_callback_1()
cancel_discovered_callback_2()
return True
async def _async_discover_igd_devices(
hass: HomeAssistant,
) -> list[ssdp.SsdpServiceInfo]:
"""Discovery IGD devices."""
return await ssdp.async_get_discovery_info_by_st(
hass, ST_IGD_V1
) + await ssdp.async_get_discovery_info_by_st(hass, ST_IGD_V2)
class UpnpFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a UPnP/IGD config flow."""
VERSION = 1
# Paths:
# - ssdp(discovery_info) --> ssdp_confirm(None) --> ssdp_confirm({}) --> create_entry()
# - user(None): scan --> user({...}) --> create_entry()
# - import(None) --> create_entry()
def __init__(self) -> None:
"""Initialize the UPnP/IGD config flow."""
self._discoveries: list[SsdpServiceInfo] | None = None
async def async_step_user(self, user_input: Mapping | None = None) -> FlowResult:
"""Handle a flow start."""
LOGGER.debug("async_step_user: user_input: %s", user_input)
if user_input is not None:
# Ensure wanted device was discovered.
assert self._discoveries
matching_discoveries = [
discovery
for discovery in self._discoveries
if discovery.ssdp_usn == user_input["unique_id"]
]
if not matching_discoveries:
return self.async_abort(reason="no_devices_found")
discovery = matching_discoveries[0]
await self.async_set_unique_id(discovery.ssdp_usn, raise_on_progress=False)
return await self._async_create_entry_from_discovery(discovery)
# Discover devices.
discoveries = await _async_discover_igd_devices(self.hass)
# Store discoveries which have not been configured.
current_unique_ids = {
entry.unique_id for entry in self._async_current_entries()
}
self._discoveries = [
discovery
for discovery in discoveries
if (
_is_complete_discovery(discovery)
and discovery.ssdp_usn not in current_unique_ids
)
]
# Ensure anything to add.
if not self._discoveries:
return self.async_abort(reason="no_devices_found")
data_schema = vol.Schema(
{
vol.Required("unique_id"): vol.In(
{
discovery.ssdp_usn: _friendly_name_from_discovery(discovery)
for discovery in self._discoveries
}
),
}
)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
)
async def async_step_import(self, import_info: Mapping | None) -> Mapping[str, Any]:
"""Import a new UPnP/IGD device as a config entry.
This flow is triggered by `async_setup`. If no device has been
configured before, find any device and create a config_entry for it.
Otherwise, do nothing.
"""
LOGGER.debug("async_step_import: import_info: %s", import_info)
# Landed here via configuration.yaml entry.
# Any device already added, then abort.
if self._async_current_entries():
LOGGER.debug("Already configured, aborting")
return self.async_abort(reason="already_configured")
# Discover devices.
await _async_wait_for_discoveries(self.hass)
discoveries = await _async_discover_igd_devices(self.hass)
# Ensure anything to add. If not, silently abort.
if not discoveries:
LOGGER.info("No UPnP devices discovered, aborting")
return self.async_abort(reason="no_devices_found")
# Ensure complete discovery.
discovery = discoveries[0]
if not _is_complete_discovery(discovery):
LOGGER.debug("Incomplete discovery, ignoring")
return self.async_abort(reason="incomplete_discovery")
# Ensure not already configuring/configured.
unique_id = discovery.ssdp_usn
await self.async_set_unique_id(unique_id)
return await self._async_create_entry_from_discovery(discovery)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Handle a discovered UPnP/IGD device.
This flow is triggered by the SSDP component. It will check if the
host is already configured and delegate to the import step if not.
"""
LOGGER.debug("async_step_ssdp: discovery_info: %s", discovery_info)
# Ensure complete discovery.
if not _is_complete_discovery(discovery_info):
LOGGER.debug("Incomplete discovery, ignoring")
return self.async_abort(reason="incomplete_discovery")
# Ensure not already configuring/configured.
unique_id = discovery_info.ssdp_usn
await self.async_set_unique_id(unique_id)
hostname = discovery_info.ssdp_headers["_host"]
self._abort_if_unique_id_configured(
updates={CONFIG_ENTRY_HOSTNAME: hostname}, reload_on_update=False
)
# Handle devices changing their UDN, only allow a single host.
existing_entries = self._async_current_entries()
for config_entry in existing_entries:
entry_hostname = config_entry.data.get(CONFIG_ENTRY_HOSTNAME)
if entry_hostname == hostname:
LOGGER.debug(
"Found existing config_entry with same hostname, discovery ignored"
)
return self.async_abort(reason="discovery_ignored")
# Store discovery.
self._discoveries = [discovery_info]
# Ensure user recognizable.
self.context["title_placeholders"] = {
"name": _friendly_name_from_discovery(discovery_info),
}
return await self.async_step_ssdp_confirm()
async def async_step_ssdp_confirm(
self, user_input: Mapping | None = None
) -> FlowResult:
"""Confirm integration via SSDP."""
LOGGER.debug("async_step_ssdp_confirm: user_input: %s", user_input)
if user_input is None:
return self.async_show_form(step_id="ssdp_confirm")
assert self._discoveries
discovery = self._discoveries[0]
return await self._async_create_entry_from_discovery(discovery)
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Define the config flow to handle options."""
return UpnpOptionsFlowHandler(config_entry)
async def _async_create_entry_from_discovery(
self,
discovery: SsdpServiceInfo,
) -> FlowResult:
"""Create an entry from discovery."""
LOGGER.debug(
"_async_create_entry_from_discovery: discovery: %s",
discovery,
)
title = _friendly_name_from_discovery(discovery)
data = {
CONFIG_ENTRY_UDN: discovery.upnp[ssdp.ATTR_UPNP_UDN],
CONFIG_ENTRY_ST: discovery.ssdp_st,
CONFIG_ENTRY_HOSTNAME: discovery.ssdp_headers["_host"],
}
return self.async_create_entry(title=title, data=data)
class UpnpOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a UPnP options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(self, user_input: Mapping = None) -> FlowResult:
"""Manage the options."""
if user_input is not None:
coordinator = self.hass.data[DOMAIN][self.config_entry.entry_id]
update_interval_sec = user_input.get(
CONFIG_ENTRY_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
update_interval = timedelta(seconds=update_interval_sec)
LOGGER.debug("Updating coordinator, update_interval: %s", update_interval)
coordinator.update_interval = update_interval
return self.async_create_entry(title="", data=user_input)
scan_interval = self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=scan_interval,
): vol.All(vol.Coerce(int), vol.Range(min=30)),
}
),
)
| {
"content_hash": "67ad942ef8fdcda46016df94d18530ae",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 91,
"avg_line_length": 34.81875,
"alnum_prop": 0.6118291150601328,
"repo_name": "GenericStudent/home-assistant",
"id": "e339c69d5d8b8a06eff6de3094186fad7b0bdeee",
"size": "11142",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/upnp/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
"""Magnum common internal object model"""
from oslo_versionedobjects import base as ovoo_base
from oslo_versionedobjects import fields as ovoo_fields
remotable_classmethod = ovoo_base.remotable_classmethod
remotable = ovoo_base.remotable
class MagnumObjectRegistry(ovoo_base.VersionedObjectRegistry):
pass
class MagnumObject(ovoo_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'magnum_object'
OBJ_PROJECT_NAMESPACE = 'magnum'
def as_dict(self):
return {k: getattr(self, k)
for k in self.fields
if self.obj_attr_is_set(k)}
class MagnumObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
pass
class MagnumPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for all persistent objects.
"""
fields = {
'created_at': ovoo_fields.DateTimeField(nullable=True),
'updated_at': ovoo_fields.DateTimeField(nullable=True),
}
class MagnumObjectIndirectionAPI(ovoo_base.VersionedObjectIndirectionAPI):
def __init__(self):
super(MagnumObjectIndirectionAPI, self).__init__()
from magnum.conductor import api as conductor_api
self._conductor = conductor_api.API()
def object_action(self, context, objinst, objmethod, args, kwargs):
return self._conductor.object_action(context, objinst, objmethod,
args, kwargs)
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
return self._conductor.object_class_action(context, objname, objmethod,
objver, args, kwargs)
def object_backport(self, context, objinst, target_version):
return self._conductor.object_backport(context, objinst,
target_version)
class MagnumObjectSerializer(ovoo_base.VersionedObjectSerializer):
# Base class to use for object hydration
OBJ_BASE_CLASS = MagnumObject
| {
"content_hash": "789e0df2ab85555e8e490e57f7634e78",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 34.070422535211264,
"alnum_prop": 0.6725919801570897,
"repo_name": "openstack/magnum",
"id": "995c6d235c025ad7211aee005aab8cfa5c67de66",
"size": "3024",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "magnum/objects/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8788"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "2302791"
},
{
"name": "Shell",
"bytes": "547968"
}
],
"symlink_target": ""
} |
"""
Pygments IRC formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
import unittest
from pygments.util import StringIO
from pygments.lexers import PythonLexer
from pygments.formatters import IRCFormatter
import support
tokensource = list(PythonLexer().get_tokens("lambda x: 123"))
class IRCFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = IRCFormatter()
houtfile = StringIO()
hfmt.format(tokensource, houtfile)
self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
| {
"content_hash": "c118b736234fa56cb89ea510a0ed23d7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 25.03448275862069,
"alnum_prop": 0.6928374655647382,
"repo_name": "sol/pygments",
"id": "3b34f0bc829589208edd56a11d586a9925169a96",
"size": "750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_irc_formatter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "636"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "5145"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "3294"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108622"
},
{
"name": "C#",
"bytes": "17784"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "Ceylon",
"bytes": "887"
},
{
"name": "Clojure",
"bytes": "22010"
},
{
"name": "CoffeeScript",
"bytes": "19898"
},
{
"name": "ColdFusion",
"bytes": "724"
},
{
"name": "Common Lisp",
"bytes": "91600"
},
{
"name": "Component Pascal",
"bytes": "84519"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "1091"
},
{
"name": "ECL",
"bytes": "2599"
},
{
"name": "Elixir",
"bytes": "10529"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "Fortran",
"bytes": "27700"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groovy",
"bytes": "2542"
},
{
"name": "Haskell",
"bytes": "49148"
},
{
"name": "Haxe",
"bytes": "12921"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Java",
"bytes": "81362"
},
{
"name": "JavaScript",
"bytes": "1328"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "878"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "OpenEdge ABL",
"bytes": "883"
},
{
"name": "PHP",
"bytes": "17242"
},
{
"name": "PowerShell",
"bytes": "4807"
},
{
"name": "Prolog",
"bytes": "738"
},
{
"name": "Python",
"bytes": "2190898"
},
{
"name": "R",
"bytes": "1975"
},
{
"name": "Racket",
"bytes": "1094"
},
{
"name": "Rebol",
"bytes": "1490"
},
{
"name": "Ruby",
"bytes": "91187"
},
{
"name": "Rust",
"bytes": "21280"
},
{
"name": "Scala",
"bytes": "327"
},
{
"name": "Scheme",
"bytes": "45856"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "112341"
},
{
"name": "Smalltalk",
"bytes": "156665"
},
{
"name": "Standard ML",
"bytes": "3006"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "16660"
},
{
"name": "Visual Basic",
"bytes": "17210"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "eC",
"bytes": "26388"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
from functools import partial
import os
import os.path as op
import numpy as np
from traitlets import observe, HasTraits, Unicode, Bool, Float
from ..io.constants import FIFF
from ..defaults import DEFAULTS
from ..io import read_info, read_fiducials, read_raw
from ..io.pick import pick_types
from ..io.open import fiff_open, dir_tree_find
from ..io.meas_info import _empty_info
from ..io._read_raw import supported as raw_supported_types
from ..coreg import Coregistration, _is_mri_subject
from ..viz._3d import (_plot_head_surface, _plot_head_fiducials,
_plot_head_shape_points, _plot_mri_fiducials,
_plot_hpi_coils, _plot_sensors)
from ..transforms import (read_trans, write_trans, _ensure_trans,
rotation_angles, _get_transforms_to_coord_frame)
from ..utils import get_subjects_dir, check_fname, _check_fname, fill_doc, warn
from ..channels import read_dig_fif
@fill_doc
class CoregistrationUI(HasTraits):
"""Class for coregistration assisted by graphical interface.
Parameters
----------
info_file : None | str
The FIFF file with digitizer data for coregistration.
%(subject)s
%(subjects_dir)s
fiducials : list | dict | str
The fiducials given in the MRI (surface RAS) coordinate
system. If a dict is provided it must be a dict with 3 entries
with keys 'lpa', 'rpa' and 'nasion' with as values coordinates in m.
If a list it must be a list of DigPoint instances as returned
by the read_fiducials function.
If set to 'estimated', the fiducials are initialized
automatically using fiducials defined in MNI space on fsaverage
template. If set to 'auto', one tries to find the fiducials
in a file with the canonical name (``bem/{subject}-fiducials.fif``)
and if abstent one falls back to 'estimated'. Defaults to 'auto'.
head_resolution : bool
If True, use a high-resolution head surface. Defaults to False.
head_transparency : bool
If True, display the head surface with transparency. Defaults to False.
hpi_coils : bool
If True, display the HPI coils. Defaults to True.
head_shape_points : bool
If True, display the head shape points. Defaults to True.
eeg_channels : bool
If True, display the EEG channels. Defaults to True.
orient_glyphs : bool
If True, orient the sensors towards the head surface. Default to False.
sensor_opacity : float
The opacity of the sensors between 0 and 1. Defaults to 1.0.
trans : str
The path to the Head<->MRI transform FIF file ("-trans.fif").
size : tuple
The dimensions (width, height) of the rendering view. The default is
(800, 600).
bgcolor : tuple | str
The background color as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w'). Defaults to 'grey'.
show : bool
Display the window as soon as it is ready. Defaults to True.
standalone : bool
If True, start the Qt application event loop. Default to False.
%(verbose)s
"""
_subject = Unicode()
_subjects_dir = Unicode()
_lock_fids = Bool()
_fiducials_file = Unicode()
_current_fiducial = Unicode()
_info_file = Unicode()
_orient_glyphs = Bool()
_hpi_coils = Bool()
_head_shape_points = Bool()
_eeg_channels = Bool()
_head_resolution = Bool()
_head_transparency = Bool()
_grow_hair = Float()
_scale_mode = Unicode()
_icp_fid_match = Unicode()
def __init__(self, info_file, subject=None, subjects_dir=None,
fiducials='auto', head_resolution=None,
head_transparency=None, hpi_coils=None,
head_shape_points=None, eeg_channels=None, orient_glyphs=None,
sensor_opacity=None, trans=None, size=None, bgcolor=None,
show=True, standalone=False, verbose=None):
from ..viz.backends.renderer import _get_renderer
def _get_default(var, val):
return var if var is not None else val
self._actors = dict()
self._surfaces = dict()
self._widgets = dict()
self._verbose = verbose
self._plot_locked = False
self._head_geo = None
self._coord_frame = "mri"
self._mouse_no_mvt = -1
self._to_cf_t = None
self._omit_hsp_distance = 0.0
self._head_opacity = 1.0
self._fid_colors = tuple(
DEFAULTS['coreg'][f'{key}_color'] for key in
('lpa', 'nasion', 'rpa'))
self._defaults = dict(
size=_get_default(size, (800, 600)),
bgcolor=_get_default(bgcolor, "grey"),
orient_glyphs=_get_default(orient_glyphs, False),
hpi_coils=_get_default(hpi_coils, True),
head_shape_points=_get_default(head_shape_points, True),
eeg_channels=_get_default(eeg_channels, True),
head_resolution=_get_default(head_resolution, False),
head_transparency=_get_default(head_transparency, False),
head_opacity=0.5,
sensor_opacity=_get_default(sensor_opacity, 1.0),
fiducials=("LPA", "Nasion", "RPA"),
fiducial="LPA",
lock_fids=False,
grow_hair=0.0,
scale_modes=["None", "uniform", "3-axis"],
scale_mode="None",
icp_fid_matches=('nearest', 'matched'),
icp_fid_match='nearest',
icp_n_iterations=20,
omit_hsp_distance=10.0,
lock_head_opacity=self._head_opacity < 1.0,
weights=dict(
lpa=1.0,
nasion=10.0,
rpa=1.0,
hsp=1.0,
eeg=1.0,
hpi=1.0,
),
)
# process requirements
info = None
subjects_dir = get_subjects_dir(
subjects_dir=subjects_dir, raise_error=True)
subject = _get_default(subject, self._get_subjects(subjects_dir)[0])
# setup the window
self._renderer = _get_renderer(
size=self._defaults["size"], bgcolor=self._defaults["bgcolor"])
self._renderer._window_close_connect(self._clean)
# setup the model
self._info = info
self._fiducials = fiducials
self._coreg = Coregistration(
self._info, subject, subjects_dir, fiducials)
for fid in self._defaults["weights"].keys():
setattr(self, f"_{fid}_weight", self._defaults["weights"][fid])
# set main traits
self._set_subjects_dir(subjects_dir)
self._set_subject(subject)
self._set_info_file(info_file)
self._set_orient_glyphs(self._defaults["orient_glyphs"])
self._set_hpi_coils(self._defaults["hpi_coils"])
self._set_head_shape_points(self._defaults["head_shape_points"])
self._set_eeg_channels(self._defaults["eeg_channels"])
self._set_head_resolution(self._defaults["head_resolution"])
self._set_head_transparency(self._defaults["head_transparency"])
self._set_grow_hair(self._defaults["grow_hair"])
self._set_omit_hsp_distance(self._defaults["omit_hsp_distance"])
self._set_icp_n_iterations(self._defaults["icp_n_iterations"])
self._set_icp_fid_match(self._defaults["icp_fid_match"])
# configure UI
self._reset_fitting_parameters()
self._configure_dock()
self._configure_picking()
# once the docks are initialized
self._set_current_fiducial(self._defaults["fiducial"])
self._set_lock_fids(self._defaults["lock_fids"])
self._set_scale_mode(self._defaults["scale_mode"])
if trans is not None:
self._load_trans(trans)
# must be done last
if show:
self._renderer.show()
if standalone:
self._renderer.figure.store["app"].exec()
def _set_subjects_dir(self, subjects_dir):
self._subjects_dir = _check_fname(
subjects_dir, overwrite=True, must_exist=True, need_dir=True)
def _set_subject(self, subject):
self._subject = subject
def _set_lock_fids(self, state):
self._lock_fids = bool(state)
def _set_fiducials_file(self, fname):
if not self._check_fif('fiducials', fname):
return
self._fiducials_file = _check_fname(
fname, overwrite=True, must_exist=True, need_dir=False)
def _set_current_fiducial(self, fid):
self._current_fiducial = fid.lower()
def _set_info_file(self, fname):
if fname is None:
return
# info file can be anything supported by read_raw
try:
check_fname(fname, 'info', tuple(raw_supported_types.keys()),
endings_err=tuple(raw_supported_types.keys()))
except IOError as e:
warn(e)
self._widgets["info_file"].set_value(0, '')
return
# ctf ds `files` are actually directories
if fname.endswith(('.ds',)):
self._info_file = _check_fname(
fname, overwrite=True, must_exist=True, need_dir=True)
else:
self._info_file = _check_fname(
fname, overwrite=True, must_exist=True, need_dir=False)
def _set_omit_hsp_distance(self, distance):
self._omit_hsp_distance = distance
def _set_orient_glyphs(self, state):
self._orient_glyphs = bool(state)
def _set_hpi_coils(self, state):
self._hpi_coils = bool(state)
def _set_head_shape_points(self, state):
self._head_shape_points = bool(state)
def _set_eeg_channels(self, state):
self._eeg_channels = bool(state)
def _set_head_resolution(self, state):
self._head_resolution = bool(state)
def _set_head_transparency(self, state):
self._head_transparency = bool(state)
def _set_grow_hair(self, value):
self._grow_hair = value
def _set_scale_mode(self, mode):
self._scale_mode = mode
def _set_fiducial(self, value, coord):
fid = self._current_fiducial.lower()
coords = ["X", "Y", "Z"]
idx = coords.index(coord)
getattr(self._coreg, f"_{fid}")[0][idx] = value / 1e3
self._update_plot("mri_fids")
def _set_parameter(self, value, mode_name, coord):
params = dict(
rotation=self._coreg._rotation,
translation=self._coreg._translation,
scale=self._coreg._scale,
)
idx = ["X", "Y", "Z"].index(coord)
if mode_name == "rotation":
params[mode_name][idx] = np.deg2rad(value)
elif mode_name == "translation":
params[mode_name][idx] = value / 1e3
else:
assert mode_name == "scale"
params[mode_name][idx] = value / 1e2
self._coreg._update_params(
rot=params["rotation"],
tra=params["translation"],
sca=params["scale"],
)
self._update_plot("sensors")
def _set_icp_n_iterations(self, n_iterations):
self._icp_n_iterations = n_iterations
def _set_icp_fid_match(self, method):
self._icp_fid_match = method
def _set_point_weight(self, weight, point):
setattr(self, f"_{point}_weight", weight)
@observe("_subjects_dir")
def _subjects_dir_changed(self, change=None):
# XXX: add coreg.set_subjects_dir
self._coreg._subjects_dir = self._subjects_dir
subjects = self._get_subjects()
self._subject = subjects[0]
self._reset()
@observe("_subject")
def _subject_changed(self, changed=None):
# XXX: add coreg.set_subject()
self._coreg._subject = self._subject
self._coreg._setup_bem()
self._coreg._setup_fiducials(self._fiducials)
self._reset()
rr = (self._coreg._processed_low_res_mri_points *
self._coreg._scale)
self._head_geo = dict(rr=rr, tris=self._coreg._bem_low_res["tris"],
nn=self._coreg._bem_low_res["nn"])
@observe("_lock_fids")
def _lock_fids_changed(self, change=None):
view_widgets = ["orient_glyphs", "show_hpi", "show_hsp",
"show_eeg", "high_res_head"]
fid_widgets = ["fid_X", "fid_Y", "fid_Z", "fids_file", "fids"]
if self._lock_fids:
self._forward_widget_command(view_widgets, "set_enabled", True)
self._actors["msg"].SetInput("")
else:
self._forward_widget_command(view_widgets, "set_enabled", False)
self._actors["msg"].SetInput("Picking fiducials...")
self._set_sensors_visibility(self._lock_fids)
self._forward_widget_command("lock_fids", "set_value", self._lock_fids)
self._forward_widget_command(fid_widgets, "set_enabled",
not self._lock_fids)
@observe("_fiducials_file")
def _fiducials_file_changed(self, change=None):
fids, _ = read_fiducials(self._fiducials_file)
self._coreg._setup_fiducials(fids)
self._reset()
@observe("_current_fiducial")
def _current_fiducial_changed(self, change=None):
self._update_fiducials()
self._follow_fiducial_view()
@observe("_info_file")
def _info_file_changed(self, change=None):
if not self._info_file:
return
elif self._info_file.endswith(('.fif', '.fif.gz')):
fid, tree, _ = fiff_open(self._info_file)
fid.close()
if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0:
self._info = read_info(self._info_file, verbose=False)
elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0:
self._info = _empty_info(1)
self._info['dig'] = read_dig_fif(fname=self._info_file).dig
self._info._unlocked = False
else:
self._info = read_raw(self._info_file).info
# XXX: add coreg.set_info()
self._coreg._info = self._info
self._coreg._setup_digs()
self._reset()
@observe("_orient_glyphs")
def _orient_glyphs_changed(self, change=None):
self._update_plot(["hpi", "hsp", "eeg"])
@observe("_hpi_coils")
def _hpi_coils_changed(self, change=None):
self._update_plot("hpi")
@observe("_head_shape_points")
def _head_shape_point_changed(self, change=None):
self._update_plot("hsp")
@observe("_eeg_channels")
def _eeg_channels_changed(self, change=None):
self._update_plot("eeg")
@observe("_head_resolution")
def _head_resolution_changed(self, change=None):
self._update_plot("head")
self._grow_hair_changed()
@observe("_head_transparency")
def _head_transparency_changed(self, change=None):
self._head_opacity = self._defaults["head_opacity"] \
if self._head_transparency else 1.0
self._actors["head"].GetProperty().SetOpacity(self._head_opacity)
self._renderer._update()
@observe("_grow_hair")
def _grow_hair_changed(self, change=None):
self._coreg.set_grow_hair(self._grow_hair)
if "head" in self._surfaces:
res = "high" if self._head_resolution else "low"
self._surfaces["head"].points = \
self._coreg._get_processed_mri_points(res)
self._renderer._update()
@observe("_scale_mode")
def _scale_mode_changed(self, change=None):
mode = None if self._scale_mode == "None" else self._scale_mode
self._coreg.set_scale_mode(mode)
self._forward_widget_command(["sX", "sY", "sZ"], "set_enabled",
mode is not None)
@observe("_icp_fid_match")
def _icp_fid_match_changed(self, change=None):
self._coreg.set_fid_match(self._icp_fid_match)
def _configure_picking(self):
self._renderer._update_picking_callback(
self._on_mouse_move,
self._on_button_press,
self._on_button_release,
self._on_pick
)
self._actors["msg"] = self._renderer.text2d(0, 0, "")
def _on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def _on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def _on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# XXX: plotter/renderer should not be exposed if possible
plotter = self._renderer.figure.plotter
picked_renderer = self._renderer.figure.plotter.renderer
# trigger the pick
plotter.picker.Pick(x, y, 0, picked_renderer)
self._mouse_no_mvt = 0
def _on_pick(self, vtk_picker, event):
if self._lock_fids:
return
# XXX: taken from Brain, can be refactored
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return
if not getattr(mesh, "_picking_target", False):
return
pos = np.array(vtk_picker.GetPickPosition())
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
fiducials = [s.lower() for s in self._defaults["fiducials"]]
idx = fiducials.index(self._current_fiducial.lower())
# XXX: add coreg.set_fids
self._coreg._fid_points[idx] = self._surfaces["head"].points[vertex_id]
self._coreg._reset_fiducials()
self._update_fiducials()
self._update_plot("mri_fids")
def _reset_fitting_parameters(self):
self._forward_widget_command("icp_n_iterations", "set_value",
self._defaults["icp_n_iterations"])
self._forward_widget_command("icp_fid_match", "set_value",
self._defaults["icp_fid_match"])
weights_widgets = [f"{w}_weight"
for w in self._defaults["weights"].keys()]
self._forward_widget_command(weights_widgets, "set_value",
list(self._defaults["weights"].values()))
def _reset_fiducials(self):
self._set_current_fiducial(self._defaults["fiducial"])
def _omit_hsp(self):
self._coreg.omit_head_shape_points(self._omit_hsp_distance / 1e3)
self._update_plot("hsp")
def _reset_omit_hsp_filter(self):
self._coreg._extra_points_filter = None
self._update_plot("hsp")
def _update_plot(self, changes="all"):
if self._plot_locked:
return
if self._info is None:
changes = ["head", "mri_fids"]
self._to_cf_t = dict(mri=dict(trans=np.eye(4)), head=None)
else:
self._to_cf_t = _get_transforms_to_coord_frame(
self._info, self._coreg.trans, coord_frame=self._coord_frame)
if not isinstance(changes, list):
changes = [changes]
forced = "all" in changes
sensors = "sensors" in changes
if "head" in changes or forced:
self._add_head_surface()
if "hsp" in changes or forced or sensors:
self._add_head_shape_points()
if "hpi" in changes or forced or sensors:
self._add_hpi_coils()
if "eeg" in changes or forced or sensors:
self._add_eeg_channels()
if "head_fids" in changes or forced or sensors:
self._add_head_fiducials()
if "mri_fids" in changes or forced or sensors:
self._add_mri_fiducials()
@contextmanager
def _lock_plot(self):
old_plot_locked = self._plot_locked
self._plot_locked = True
try:
yield
finally:
self._plot_locked = old_plot_locked
@contextmanager
def _display_message(self, msg):
old_msg = self._actors["msg"].GetInput()
self._actors["msg"].SetInput(msg)
self._renderer._update()
try:
yield
finally:
self._actors["msg"].SetInput(old_msg)
self._renderer._update()
def _follow_fiducial_view(self):
fid = self._current_fiducial.lower()
view = dict(lpa='left', rpa='right', nasion='front')
kwargs = dict(front=(90., 90.), left=(180, 90), right=(0., 90))
kwargs = dict(zip(('azimuth', 'elevation'), kwargs[view[fid]]))
if not self._lock_fids:
self._renderer.set_camera(distance=None, **kwargs)
def _update_fiducials(self):
fid = self._current_fiducial.lower()
val = getattr(self._coreg, f"_{fid}")[0] * 1e3
with self._lock_plot():
self._forward_widget_command(
["fid_X", "fid_Y", "fid_Z"], "set_value", val)
def _update_parameters(self):
with self._lock_plot():
# rotation
self._forward_widget_command(["rX", "rY", "rZ"], "set_value",
np.rad2deg(self._coreg._rotation))
# translation
self._forward_widget_command(["tX", "tY", "tZ"], "set_value",
self._coreg._translation * 1e3)
# scale
self._forward_widget_command(["sX", "sY", "sZ"], "set_value",
self._coreg._scale * 1e2)
def _reset(self):
self._reset_fitting_parameters()
self._coreg.reset()
self._update_plot()
self._update_parameters()
def _forward_widget_command(self, names, command, value):
names = [names] if not isinstance(names, list) else names
value = list(value) if isinstance(value, np.ndarray) else value
for idx, name in enumerate(names):
val = value[idx] if isinstance(value, list) else value
if name in self._widgets:
getattr(self._widgets[name], command)(val)
def _set_sensors_visibility(self, state):
sensors = ["head_fiducials", "hpi_coils", "head_shape_points",
"eeg_channels"]
for sensor in sensors:
if sensor in self._actors and self._actors[sensor] is not None:
actors = self._actors[sensor]
actors = actors if isinstance(actors, list) else [actors]
for actor in actors:
actor.SetVisibility(state)
self._renderer._update()
def _update_actor(self, actor_name, actor):
self._renderer.plotter.remove_actor(self._actors.get(actor_name))
self._actors[actor_name] = actor
self._renderer._update()
def _add_mri_fiducials(self):
mri_fids_actors = _plot_mri_fiducials(
self._renderer, self._coreg._fid_points, self._subjects_dir,
self._subject, self._to_cf_t, self._fid_colors)
# disable picking on the markers
for actor in mri_fids_actors:
actor.SetPickable(False)
self._update_actor("mri_fiducials", mri_fids_actors)
def _add_head_fiducials(self):
head_fids_actors = _plot_head_fiducials(
self._renderer, self._info, self._to_cf_t, self._fid_colors)
self._update_actor("head_fiducials", head_fids_actors)
def _add_hpi_coils(self):
if self._hpi_coils:
hpi_actors = _plot_hpi_coils(
self._renderer, self._info, self._to_cf_t,
opacity=self._defaults["sensor_opacity"],
orient_glyphs=self._orient_glyphs, surf=self._head_geo)
else:
hpi_actors = None
self._update_actor("hpi_coils", hpi_actors)
def _add_head_shape_points(self):
if self._head_shape_points:
hsp_actors = _plot_head_shape_points(
self._renderer, self._info, self._to_cf_t,
opacity=self._defaults["sensor_opacity"],
orient_glyphs=self._orient_glyphs, surf=self._head_geo,
mask=self._coreg._extra_points_filter)
else:
hsp_actors = None
self._update_actor("head_shape_points", hsp_actors)
def _add_eeg_channels(self):
if self._eeg_channels:
eeg = ["original"]
picks = pick_types(self._info, eeg=(len(eeg) > 0))
if len(picks) > 0:
eeg_actors = _plot_sensors(
self._renderer, self._info, self._to_cf_t, picks,
meg=False, eeg=eeg, fnirs=False, warn_meg=False,
head_surf=self._head_geo, units='m',
sensor_opacity=self._defaults["sensor_opacity"],
orient_glyphs=self._orient_glyphs, surf=self._head_geo)
eeg_actors = eeg_actors["eeg"]
else:
eeg_actors = None
else:
eeg_actors = None
self._update_actor("eeg_channels", eeg_actors)
def _add_head_surface(self):
bem = None
surface = "head-dense" if self._head_resolution else "head"
try:
head_actor, head_surf, _ = _plot_head_surface(
self._renderer, surface, self._subject,
self._subjects_dir, bem, self._coord_frame, self._to_cf_t,
alpha=self._head_opacity)
except IOError:
head_actor, head_surf, _ = _plot_head_surface(
self._renderer, "head", self._subject, self._subjects_dir,
bem, self._coord_frame, self._to_cf_t,
alpha=self._head_opacity)
# mark head surface mesh to restrict picking
head_surf._picking_target = True
self._update_actor("head", head_actor)
self._surfaces["head"] = head_surf
def _fit_fiducials(self):
self._coreg.fit_fiducials(
lpa_weight=self._lpa_weight,
nasion_weight=self._nasion_weight,
rpa_weight=self._rpa_weight,
verbose=self._verbose,
)
self._update_plot("sensors")
self._update_parameters()
def _fit_icp(self):
with self._display_message("Fitting..."):
self._coreg.fit_icp(
n_iterations=self._icp_n_iterations,
lpa_weight=self._lpa_weight,
nasion_weight=self._nasion_weight,
rpa_weight=self._rpa_weight,
callback=lambda x, y: self._update_plot("sensors"),
verbose=self._verbose,
)
self._update_parameters()
def _save_trans(self, fname):
write_trans(fname, self._coreg.trans)
def _load_trans(self, fname):
mri_head_t = _ensure_trans(read_trans(fname, return_all=True),
'mri', 'head')['trans']
rot_x, rot_y, rot_z = rotation_angles(mri_head_t)
x, y, z = mri_head_t[:3, 3]
self._coreg._update_params(
rot=np.array([rot_x, rot_y, rot_z]),
tra=np.array([x, y, z]),
)
self._update_plot("sensors")
self._update_parameters()
def _get_subjects(self, sdir=None):
# XXX: would be nice to move this function to util
sdir = sdir if sdir is not None else self._subjects_dir
is_dir = sdir and op.isdir(sdir)
if is_dir:
dir_content = os.listdir(sdir)
subjects = [s for s in dir_content if _is_mri_subject(s, sdir)]
if len(subjects) == 0:
subjects.append('')
else:
subjects = ['']
return sorted(subjects)
def _check_fif(self, filetype, fname):
try:
check_fname(fname, filetype, ('.fif'), ('.fif'))
except IOError:
warn(f"The filename {fname} for {filetype} must end with '.fif'.")
self._widgets[f"{filetype}_file"].set_value(0, '')
return False
return True
def _configure_dock(self):
self._renderer._dock_initialize(name="Input", area="left")
layout = self._renderer._dock_add_group_box("MRI Subject")
self._widgets["subjects_dir"] = self._renderer._dock_add_file_button(
name="subjects_dir",
desc="Load",
func=self._set_subjects_dir,
value=self._subjects_dir,
placeholder="Subjects Directory",
directory=True,
layout=layout,
)
self._widgets["subject"] = self._renderer._dock_add_combo_box(
name="Subject",
value=self._subject,
rng=self._get_subjects(),
callback=self._set_subject,
compact=True,
layout=layout
)
layout = self._renderer._dock_add_group_box("MRI Fiducials")
self._widgets["lock_fids"] = self._renderer._dock_add_check_box(
name="Lock fiducials",
value=self._lock_fids,
callback=self._set_lock_fids,
layout=layout
)
self._widgets["fiducials_file"] = self._renderer._dock_add_file_button(
name="fiducials_file",
desc="Load",
func=self._set_fiducials_file,
value=self._fiducials_file,
placeholder="Path to fiducials",
layout=layout,
)
self._widgets["fids"] = self._renderer._dock_add_radio_buttons(
value=self._defaults["fiducial"],
rng=self._defaults["fiducials"],
callback=self._set_current_fiducial,
vertical=False,
layout=layout,
)
hlayout = self._renderer._dock_add_layout()
for coord in ("X", "Y", "Z"):
name = f"fid_{coord}"
self._widgets[name] = self._renderer._dock_add_spin_box(
name=coord,
value=0.,
rng=[-1e3, 1e3],
callback=partial(
self._set_fiducial,
coord=coord,
),
compact=True,
double=True,
layout=hlayout
)
self._renderer._layout_add_widget(layout, hlayout)
layout = self._renderer._dock_add_group_box("Digitization Source")
self._widgets["info_file"] = self._renderer._dock_add_file_button(
name="info_file",
desc="Load",
func=self._set_info_file,
value=self._info_file,
placeholder="Path to info",
layout=layout,
)
self._widgets["grow_hair"] = self._renderer._dock_add_spin_box(
name="Grow Hair",
value=self._grow_hair,
rng=[0.0, 10.0],
callback=self._set_grow_hair,
layout=layout,
)
hlayout = self._renderer._dock_add_layout(vertical=False)
self._widgets["omit_distance"] = self._renderer._dock_add_spin_box(
name="Omit Distance",
value=self._omit_hsp_distance,
rng=[0.0, 100.0],
callback=self._set_omit_hsp_distance,
layout=hlayout,
)
self._widgets["omit"] = self._renderer._dock_add_button(
name="Omit",
callback=self._omit_hsp,
layout=hlayout,
)
self._widgets["reset_omit"] = self._renderer._dock_add_button(
name="Reset",
callback=self._reset_omit_hsp_filter,
layout=hlayout,
)
self._renderer._layout_add_widget(layout, hlayout)
layout = self._renderer._dock_add_group_box("View")
self._widgets["orient_glyphs"] = self._renderer._dock_add_check_box(
name="Orient glyphs",
value=self._orient_glyphs,
callback=self._set_orient_glyphs,
layout=layout
)
self._widgets["show_hpi"] = self._renderer._dock_add_check_box(
name="Show HPI Coils",
value=self._hpi_coils,
callback=self._set_hpi_coils,
layout=layout
)
self._widgets["show_hsp"] = self._renderer._dock_add_check_box(
name="Show Head Shape Points",
value=self._head_shape_points,
callback=self._set_head_shape_points,
layout=layout
)
self._widgets["show_eeg"] = self._renderer._dock_add_check_box(
name="Show EEG Channels",
value=self._eeg_channels,
callback=self._set_eeg_channels,
layout=layout
)
self._widgets["high_res_head"] = self._renderer._dock_add_check_box(
name="Show High Resolution Head",
value=self._head_resolution,
callback=self._set_head_resolution,
layout=layout
)
self._widgets["make_transparent"] = self._renderer._dock_add_check_box(
name="Make skin surface transparent",
value=self._head_transparency,
callback=self._set_head_transparency,
layout=layout
)
self._renderer._dock_add_stretch()
self._renderer._dock_initialize(name="Parameters", area="right")
self._widgets["scaling_mode"] = self._renderer._dock_add_combo_box(
name="Scaling Mode",
value=self._defaults["scale_mode"],
rng=self._defaults["scale_modes"],
callback=self._set_scale_mode,
compact=True,
)
hlayout = self._renderer._dock_add_group_box(
name="Scaling Parameters",
)
for coord in ("X", "Y", "Z"):
name = f"s{coord}"
self._widgets[name] = self._renderer._dock_add_spin_box(
name=name,
value=0.,
rng=[-1e3, 1e3],
callback=partial(
self._set_parameter,
mode_name="scale",
coord=coord,
),
compact=True,
double=True,
layout=hlayout
)
for mode, mode_name in (("t", "Translation"), ("r", "Rotation")):
hlayout = self._renderer._dock_add_group_box(
f"{mode_name} ({mode})")
for coord in ("X", "Y", "Z"):
name = f"{mode}{coord}"
self._widgets[name] = self._renderer._dock_add_spin_box(
name=name,
value=0.,
rng=[-1e3, 1e3],
callback=partial(
self._set_parameter,
mode_name=mode_name.lower(),
coord=coord,
),
compact=True,
double=True,
step=1,
layout=hlayout
)
layout = self._renderer._dock_add_group_box("Fitting")
hlayout = self._renderer._dock_add_layout(vertical=False)
self._renderer._dock_add_button(
name="Fit Fiducials",
callback=self._fit_fiducials,
layout=hlayout,
)
self._renderer._dock_add_button(
name="Fit ICP",
callback=self._fit_icp,
layout=hlayout,
)
self._renderer._layout_add_widget(layout, hlayout)
self._widgets["icp_n_iterations"] = self._renderer._dock_add_spin_box(
name="Number Of ICP Iterations",
value=self._defaults["icp_n_iterations"],
rng=[1, 100],
callback=self._set_icp_n_iterations,
compact=True,
double=False,
layout=layout,
)
self._widgets["icp_fid_match"] = self._renderer._dock_add_combo_box(
name="Fiducial point matching",
value=self._defaults["icp_fid_match"],
rng=self._defaults["icp_fid_matches"],
callback=self._set_icp_fid_match,
compact=True,
layout=layout
)
layout = self._renderer._dock_add_group_box(
name="Weights",
layout=layout,
)
for point, fid in zip(("HSP", "EEG", "HPI"),
self._defaults["fiducials"]):
hlayout = self._renderer._dock_add_layout(vertical=False)
point_lower = point.lower()
name = f"{point_lower}_weight"
self._widgets[name] = self._renderer._dock_add_spin_box(
name=point,
value=getattr(self, f"_{point_lower}_weight"),
rng=[1., 100.],
callback=partial(self._set_point_weight, point=point_lower),
compact=True,
double=True,
layout=hlayout
)
fid_lower = fid.lower()
name = f"{fid_lower}_weight"
self._widgets[name] = self._renderer._dock_add_spin_box(
name=fid,
value=getattr(self, f"_{fid_lower}_weight"),
rng=[1., 100.],
callback=partial(self._set_point_weight, point=fid_lower),
compact=True,
double=True,
layout=hlayout
)
self._renderer._layout_add_widget(layout, hlayout)
self._renderer._dock_add_button(
name="Reset Fitting Options",
callback=self._reset_fitting_parameters,
layout=layout,
)
layout = self._renderer._dock_layout
hlayout = self._renderer._dock_add_layout(vertical=False)
self._renderer._dock_add_button(
name="Reset",
callback=self._reset,
layout=hlayout,
)
self._widgets["save_trans"] = self._renderer._dock_add_file_button(
name="save_trans",
desc="Save...",
save=True,
func=self._save_trans,
input_text_widget=False,
layout=hlayout,
)
self._widgets["load_trans"] = self._renderer._dock_add_file_button(
name="load_trans",
desc="Load...",
func=self._load_trans,
input_text_widget=False,
layout=hlayout,
)
self._renderer._layout_add_widget(layout, hlayout)
self._renderer._dock_add_stretch()
def _clean(self):
self._renderer = None
self._coreg = None
self._widgets.clear()
self._actors.clear()
self._surfaces.clear()
self._defaults.clear()
self._head_geo = None
def close(self):
"""Close interface and cleanup data structure."""
self._renderer.close()
| {
"content_hash": "5661091fa70fc18273ec55002a6b55fc",
"timestamp": "",
"source": "github",
"line_count": 1010,
"max_line_length": 79,
"avg_line_length": 38.43960396039604,
"alnum_prop": 0.5515660416237379,
"repo_name": "bloyl/mne-python",
"id": "7f882b314a91fa403d705e64be7e8bc700922e98",
"size": "38825",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "mne/gui/_coreg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "8190297"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import numpy as np
from PyQt5 import QtCore, QtGui
class FitParametersModel(QtCore.QAbstractItemModel):
"""A model storing fit parameters.
Columns:
(X) name | (X) lower bound | (X) upper bound | value | uncertainty | uncertainty percent
(X) : has a tick/checker.
The checker before the first column ("name") allows or disables
fitting of the parameter. If fitting is allowed, "lower bound"
and "upper bound" are checkable.
"""
def __init__(self, parameters, unfittables):
self._parameters = []
for l in parameters:
self._parameters.append({'name': l[0],
'lowerbound': 0,
'lowerbound_enabled': False,
'upperbound_enabled': False,
'lowerbound_active': False,
'upperbound_active': False,
'upperbound': 0,
'value': 1,
'uncertainty': 0,
'description': l[1],
'enabled': True,
'fittable': True})
for l in unfittables:
self._parameters.append({'name':l[0],
'description':l[1],
'lowerbound': l[2],
'upperbound': l[3],
'lowerbound_enabled':True,
'upperbound_enabled':True,
'lowerbound_active':False,
'upperbound_active':False,
'value': l[4],
'uncertainty':0,
'enabled':False,
'fittable':False,})
super().__init__()
def index(self, row, column, parent=None, *args, **kwargs):
if column not in [0, 1, 2, 3, 4, 5]:
raise ValueError('Invalid column: {}'.format(column))
if row >= len(self._parameters):
raise ValueError('Invalid row: {}'.format(row))
return self.createIndex(row, column, None)
def parent(self, modelindex=None):
return QtCore.QModelIndex()
def rowCount(self, parent=None, *args, **kwargs):
return len(self._parameters)
def columnCount(self, parent=None, *args, **kwargs):
return 6
def headerData(self, column, orientation, role=None):
if orientation == QtCore.Qt.Vertical:
return None
if role is None:
role = QtCore.Qt.DisplayRole
if role == QtCore.Qt.DisplayRole:
return ['Name', 'Min.', 'Max.', 'Value', 'Uncertainty', 'Rel. unc. (%)'][column]
def flags(self, modelindex):
column = modelindex.column()
row = modelindex.row()
flagstoset = QtCore.Qt.ItemNeverHasChildren
if 'fittable' not in self._parameters[row]:
self._parameters[row]['fittable']=True
if column == 0:
# The name column is user-checkable.
if self._parameters[row]['fittable']:
flagstoset |= QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled
else:
flagstoset |= QtCore.Qt.ItemIsEnabled
elif column in [1, 2]:
# lower and upper bound is user-checkable iff fitting is enabled
if not self._parameters[row]['fittable']:
flagstoset |= 0
if self._parameters[row]['enabled']:
flagstoset |= QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled
if (column == 1) and (self._parameters[row]['lowerbound_enabled']):
flagstoset |= QtCore.Qt.ItemIsEditable
elif (column == 2) and (self._parameters[row]['upperbound_enabled']):
flagstoset |= QtCore.Qt.ItemIsEditable
elif column == 3:
# the value is always enabled and editable
flagstoset |= QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable
elif column in [4,5]:
if self._parameters[row]['enabled']:
flagstoset |= QtCore.Qt.ItemIsEnabled
return flagstoset
def data(self, modelindex, role=None):
row = modelindex.row()
column = modelindex.column()
if role is None:
role = QtCore.Qt.DisplayRole
if column == 0:
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return self._parameters[row]['name']
elif role == QtCore.Qt.CheckStateRole:
return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][self._parameters[row]['enabled']]
elif column == 1:
if role == QtCore.Qt.DisplayRole:
if self._parameters[row]['lowerbound_enabled']:
return str(self._parameters[row]['lowerbound'])
else:
return 'Unlimited'
elif role == QtCore.Qt.EditRole:
assert self._parameters[row]['lowerbound_enabled']
return self._parameters[row]['lowerbound']
elif role == QtCore.Qt.CheckStateRole:
return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][self._parameters[row]['lowerbound_enabled']]
elif role == QtCore.Qt.DecorationRole:
return [None, QtGui.QIcon.fromTheme('dialog-warning')][self._parameters[row]['lowerbound_active']]
elif column == 2:
if role == QtCore.Qt.DisplayRole:
if self._parameters[row]['upperbound_enabled']:
return str(self._parameters[row]['upperbound'])
else:
return 'Unlimited'
elif role == QtCore.Qt.EditRole:
assert self._parameters[row]['upperbound_enabled']
return self._parameters[row]['upperbound']
elif role == QtCore.Qt.CheckStateRole:
return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][self._parameters[row]['upperbound_enabled']]
elif role == QtCore.Qt.DecorationRole:
return [None, QtGui.QIcon.fromTheme('dialog-warning')][self._parameters[row]['upperbound_active']]
elif column == 3:
if role == QtCore.Qt.DisplayRole:
return str(self._parameters[row]['value'])
elif role == QtCore.Qt.EditRole:
return self._parameters[row]['value']
elif role == QtCore.Qt.BackgroundRole:
if ((self._parameters[row]['upperbound_enabled'] and
self._parameters[row]['value'] > self._parameters[row]['upperbound']) or
(self._parameters[row]['lowerbound_enabled'] and
self._parameters[row]['value'] < self._parameters[row]['lowerbound'])):
return QtGui.QBrush(QtCore.Qt.red)
elif column == 4:
if role == QtCore.Qt.DisplayRole:
if self._parameters[row]['enabled']:
return str(self._parameters[row]['uncertainty'])
else:
return '(fixed)'
elif column == 5:
if role == QtCore.Qt.DisplayRole:
if self._parameters[row]['enabled']:
if np.abs(self._parameters[row]['value'])<=np.finfo(self._parameters[row]['value']).eps:
return 'infinite'
else:
return '{:.2f} %'.format(np.abs(self._parameters[row]['uncertainty']/self._parameters[row]['value'])*100)
else:
return '(fixed)'
if role == QtCore.Qt.ToolTipRole:
return self._parameters[row]['description']
return None
def setData(self, modelindex, data, role=QtCore.Qt.EditRole):
row = modelindex.row()
column = modelindex.column()
if role is None:
role = QtCore.Qt.EditRole
if 'fittable' not in self._parameters[row]:
self._parameters[row]['fittable']=True
if role == QtCore.Qt.CheckStateRole:
if column == 0 and self._parameters[row]['fittable']:
self._parameters[row]['enabled'] = data == QtCore.Qt.Checked
self.dataChanged.emit(modelindex, self.createIndex(row, self.columnCount() - 1, None))
elif column == 1 and self._parameters[row]['fittable']:
self._parameters[row]['lowerbound_enabled'] = data == QtCore.Qt.Checked
self.dataChanged.emit(modelindex, self.createIndex(row, self.columnCount() - 1))
elif column == 2 and self._parameters[row]['fittable']:
self._parameters[row]['upperbound_enabled'] = data == QtCore.Qt.Checked
self.dataChanged.emit(modelindex, self.createIndex(row, self.columnCount() - 1))
else:
return False
elif role == QtCore.Qt.EditRole:
if column == 1 and self._parameters[row]['fittable']:
self._parameters[row]['lowerbound'] = float(data)
self.dataChanged.emit(modelindex, self.createIndex(row, self.columnCount() - 1))
elif column == 2 and self.parameters[row]['fittable']:
self._parameters[row]['upperbound'] = float(data)
self.dataChanged.emit(modelindex, self.createIndex(row, self.columnCount() - 1))
elif column == 3:
self._parameters[row]['value'] = float(data)
self.dataChanged.emit(modelindex, self.createIndex(row, self.columnCount() - 1))
else:
return False
else:
return False
return True
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, newparams):
self.beginRemoveRows(QtCore.QModelIndex(), 0, len(self._parameters))
self._parameters = []
self.endRemoveRows()
self.beginInsertRows(QtCore.QModelIndex(), 0, len(newparams))
self._parameters = newparams
for p in self._parameters:
if 'fittable' not in p:
p['fittable'] = True
self.endInsertRows()
def update_parameters(self, values, uncertainties):
assert len(values) == len([p for p in self._parameters if p['fittable']])
assert len(uncertainties) == len([p for p in self._parameters if p['fittable']])
for i in range(len(values)):
self._parameters[i]['value'] = values[i]
self._parameters[i]['uncertainty'] = uncertainties[i]
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(), self.columnCount()))
def emitParametersChanged(self):
self.dataChanged.emit(self.createIndex(0,0), self.createIndex(self.rowCount(), self.columnCount()))
def update_limits(self, lower=None, upper=None):
fittablepars=[p for p in self._parameters if p['fittable']]
if lower is None:
lower = [None] * len( fittablepars)
if upper is None:
upper = [None] * len(fittablepars)
for par, low, up in zip(fittablepars, lower, upper):
if low is None or low == np.nan:
par['lowerbound_enabled'] = False
par['lowerbound'] = 0
else:
par['lowerbound_enabled'] = True
par['lowerbound'] = low
if up is None or up == np.nan:
par['upperbound_enabled'] = False
par['upperbound'] = 0
else:
par['upperbound_enabled'] = True
par['upperbound'] = up
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(), self.columnCount()))
def update_active_mask(self, active_mask):
for par, am in zip(self._parameters, active_mask):
par['upperbound_active'] = (am == 1)
par['lowerbound_active'] = (am == -1)
self.dataChanged.emit(self.createIndex(0, 1), self.createIndex(self.rowCount() - 1, 2),
[QtCore.Qt.DecorationRole])
| {
"content_hash": "78b7399b12e3429acf0337f70b028574",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 129,
"avg_line_length": 47.65503875968992,
"alnum_prop": 0.5367222448149654,
"repo_name": "awacha/saxsfittool",
"id": "d9e25ccff6ad981d8df9610d38d49edea61db943",
"size": "12295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/saxsfittool/mainwindow/fitparametersmodel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "246"
},
{
"name": "Python",
"bytes": "105079"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
} |
from .VecHelp import (IMAGE, LINE, CIRCLE, BEZIER, ELLIPSE_BEZIER, POLYGON,
PATH, TEXT)
from .VecHelp import Pen, Brush, Font
from ginga.canvas import render
class RenderContext(render.RenderContextBase):
def __init__(self, renderer, viewer, surface):
render.RenderContextBase.__init__(self, renderer, viewer)
self.pen = None
self.brush = None
self.font = None
def set_line_from_shape(self, shape):
alpha = getattr(shape, 'alpha', 1.0)
linewidth = getattr(shape, 'linewidth', 1.0)
linestyle = getattr(shape, 'linestyle', 'solid')
self.pen = Pen(shape.color, linewidth=linewidth,
linestyle=linestyle, alpha=alpha)
def set_fill_from_shape(self, shape):
fill = getattr(shape, 'fill', False)
if fill:
if hasattr(shape, 'fillcolor') and shape.fillcolor:
color = shape.fillcolor
else:
color = shape.color
alpha = getattr(shape, 'alpha', 1.0)
alpha = getattr(shape, 'fillalpha', alpha)
self.brush = Brush(color, alpha=alpha)
else:
self.brush = None
def set_font_from_shape(self, shape):
if hasattr(shape, 'font'):
if hasattr(shape, 'fontsize') and shape.fontsize is not None:
fontsize = shape.fontsize
else:
fontsize = shape.scale_font(self.viewer)
fontsize = self.scale_fontsize(fontsize)
alpha = getattr(shape, 'alpha', 1.0)
self.font = Font(shape.font, fontsize, shape.color, alpha=alpha)
else:
self.font = None
def initialize_from_shape(self, shape, line=True, fill=True, font=True):
if line:
self.set_line_from_shape(shape)
if fill:
self.set_fill_from_shape(shape)
if font:
self.set_font_from_shape(shape)
def set_line(self, color, alpha=1.0, linewidth=1, style='solid'):
# TODO: support line width and style
self.pen = Pen(color, alpha=alpha)
def set_fill(self, color, alpha=1.0):
if color is None:
self.brush = None
else:
self.brush = Brush(color, alpha=alpha)
def set_font(self, fontname, fontsize, color='black', alpha=1.0):
fontsize = self.scale_fontsize(fontsize)
self.font = Font(fontname, fontsize, color, alpha=alpha)
def text_extents(self, text):
return self.renderer.text_extents(text, self.font)
##### DRAWING OPERATIONS #####
def draw_image(self, image_id, cpoints, rgb_arr, whence, order='RGB'):
self.renderer.rl.append((IMAGE, (image_id, cpoints, rgb_arr, whence,
order)))
def draw_text(self, cx, cy, text, rot_deg=0.0):
self.renderer.rl.append((TEXT, (cx, cy, text, rot_deg),
self.pen, self.brush, self.font))
def draw_polygon(self, cpoints):
self.renderer.rl.append((POLYGON, cpoints, self.pen, self.brush))
def draw_circle(self, cx, cy, cradius):
self.renderer.rl.append((CIRCLE, (cx, cy, cradius),
self.pen, self.brush))
## def draw_bezier_curve(self, cpoints):
## self.renderer.rl.append((BEZIER, cpoints, self.pen, self.brush))
## def draw_ellipse_bezier(self, cpoints):
## # draw 4 bezier curves to make the ellipse
## self.renderer.rl.append((ELLIPSE_BEZIER, cpoints, self.pen, self.brush))
## def draw_ellipse(self, cx, cy, cxradius, cyradius, rot_deg):
## self.renderer.rl.append((ELLIPSE,
## (cx, cy, cxradius, cyradius, rot_deg),
## self.pen, self.brush))
def draw_line(self, cx1, cy1, cx2, cy2):
self.renderer.rl.append((LINE, (cx1, cy1, cx2, cy2),
self.pen, self.brush))
def draw_path(self, cpoints):
self.renderer.rl.append((PATH, cpoints, self.pen, self.brush))
class VectorRenderMixin:
def __init__(self):
# the render list
self.rl = []
def initialize(self):
wd, ht = self.dims
cpoints = ((0, 0), (wd, 0), (wd, ht), (ht, 0))
bg = self.viewer.get_bg()
pen = Pen(color=bg)
brush = Brush(color=bg, fill=True)
self.rl = [(POLYGON, cpoints, pen, brush)]
def draw_vector(self, cr):
for tup in self.rl:
dtyp, font = None, None
try:
dtyp = tup[0]
if dtyp == IMAGE:
(image_id, cpoints, rgb_arr, whence, order) = tup[1]
cr.draw_image(image_id, cpoints, rgb_arr, whence,
order=self.rgb_order)
elif dtyp == LINE:
(cx1, cy1, cx2, cy2) = tup[1]
cr.setup_pen_brush(*tup[2:4])
cr.draw_line(cx1, cy1, cx2, cy2)
elif dtyp == CIRCLE:
(cx, cy, cradius) = tup[1]
cr.setup_pen_brush(*tup[2:4])
cr.draw_circle(cx, cy, cradius)
elif dtyp == BEZIER:
cpoints = tup[1]
cr.setup_pen_brush(*tup[2:4])
cr.draw_bezier_curve(cpoints)
elif dtyp == ELLIPSE_BEZIER:
cpoints = tup[1]
cr.setup_pen_brush(*tup[2:4])
cr.draw_ellipse_bezier(cpoints)
elif dtyp == POLYGON:
cpoints = tup[1]
cr.setup_pen_brush(*tup[2:4])
cr.draw_polygon(cpoints)
elif dtyp == PATH:
cpoints = tup[1]
cr.setup_pen_brush(*tup[2:4])
cr.draw_path(cpoints)
elif dtyp == TEXT:
(cx, cy, text, rot_deg) = tup[1]
cr.setup_pen_brush(*tup[2:4])
font = tup[4]
cr.set_font(font.fontname, font.fontsize,
color=font.color, alpha=font.alpha)
cr.draw_text(cx, cy, text, rot_deg=rot_deg)
except Exception as e:
self.logger.error("Error drawing '{}': {}".format(dtyp, e),
exc_info=True)
#END
| {
"content_hash": "4f31de673804b0970f8e0e4ea0fbc902",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 83,
"avg_line_length": 36.26553672316384,
"alnum_prop": 0.5136314067611778,
"repo_name": "pllim/ginga",
"id": "e5051e3b7e34999d0268e85c4651716242bf392d",
"size": "6606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/vec/CanvasRenderVec.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2781"
},
{
"name": "GLSL",
"bytes": "7344"
},
{
"name": "HTML",
"bytes": "2129"
},
{
"name": "JavaScript",
"bytes": "87198"
},
{
"name": "Jupyter Notebook",
"bytes": "2691970"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "4359761"
}
],
"symlink_target": ""
} |
"""Views for REST APIs for reporting posts and comments"""
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from open_discussions.permissions import IsStaffOrModeratorPermission
from channels.api import Api
from channels.serializers.reports import ReportSerializer, ReportedContentSerializer
from channels.utils import translate_praw_exceptions
class ReportContentView(APIView):
"""
View to report a comment or post
"""
permission_classes = (IsAuthenticated,)
def get_serializer_context(self):
"""Context for the request and view"""
return {
"channel_api": self.request.channel_api,
"current_user": self.request.user,
"request": self.request,
"view": self,
}
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""Create a report"""
with translate_praw_exceptions(request.user):
serializer = ReportSerializer(
data=request.data, context=self.get_serializer_context()
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
class ChannelReportListView(APIView):
"""
Moderator view for reported comments and posts in a channels
"""
permission_classes = (IsAuthenticated, IsStaffOrModeratorPermission)
def get_serializer_context(self):
"""Context for the request and view"""
return {
"channel_api": self.request.channel_api,
"current_user": self.request.user,
"request": self.request,
"view": self,
}
def get(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""List of reports"""
with translate_praw_exceptions(request.user):
api = Api(user=request.user)
reports = api.list_reports(self.kwargs["channel_name"])
serializer = ReportedContentSerializer(
reports, many=True, context=self.get_serializer_context()
)
return Response(serializer.data)
| {
"content_hash": "9723c24f298c39ca9ae2d4e275c6cf14",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 84,
"avg_line_length": 34.40909090909091,
"alnum_prop": 0.6525759577278731,
"repo_name": "mitodl/open-discussions",
"id": "95b215abea605b7bc44603eb59896d44ed2d97e8",
"size": "2271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "channels/views/reports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
} |
class ParameterGroup(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self.name = None
self.description = None
self.engine = None
self._current_param = None
def __repr__(self):
return 'ParameterGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Parameter':
if self._current_param:
self[self._current_param.name] = self._current_param
self._current_param = Parameter(self)
return self._current_param
def endElement(self, name, value, connection):
if name == 'DBParameterGroupName':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Engine':
self.engine = value
else:
setattr(self, name, value)
def modifiable(self):
mod = []
for key in self:
p = self[key]
if p.is_modifiable:
mod.append(p)
return mod
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
param.value = value
param.apply_method = apply_method
self.params.append(param)
class Parameter(object):
"""
Represents a RDS Parameter
"""
ValidTypes = {'integer' : int,
'string' : str,
'boolean' : bool}
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
self.type = 'string'
self.source = None
self.is_modifiable = True
self.description = None
self.apply_method = None
self.allowed_values = None
def __repr__(self):
return 'Parameter:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ParameterName':
self.name = value
elif name == 'ParameterValue':
self._value = value
elif name == 'DataType':
if value in self.ValidTypes:
self.type = value
elif name == 'Source':
if value in self.ValidSources:
self.source = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'Description':
self.description = value
elif name == 'ApplyType':
if value in self.ValidApplyTypes:
self.apply_type = value
elif name == 'AllowedValues':
self.allowed_values = value
else:
setattr(self, name, value)
def merge(self, d, i):
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
if self._value is not None:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, str) or isinstance(value, unicode):
raise ValueError('value must be of type str')
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
raise ValueError('value must be in %s' % self.allowed_values)
self._value = value
def _set_integer_value(self, value):
if isinstance(value, str) or isinstance(value, unicode):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
raise ValueError('range is %s' % self.allowed_values)
self._value = value
else:
raise ValueError('value must be integer')
def _set_boolean_value(self, value):
if isinstance(value, bool):
self._value = value
elif isinstance(value, str) or isinstance(value, unicode):
if value.lower() == 'true':
self._value = True
else:
self._value = False
else:
raise ValueError('value must be boolean')
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
elif self.type == 'integer':
self._set_integer_value(value)
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
raise TypeError('unknown type (%s)' % self.type)
def get_value(self):
if self._value == None:
return self._value
if self.type == 'string':
return self._value
elif self.type == 'integer':
if not isinstance(self._value, int) and not isinstance(self._value, long):
self._set_integer_value(self._value)
return self._value
elif self.type == 'boolean':
if not isinstance(self._value, bool):
self._set_boolean_value(self._value)
return self._value
else:
raise TypeError('unknown type (%s)' % self.type)
value = property(get_value, set_value, 'The value of the parameter')
def apply(self, immediate=False):
if immediate:
self.apply_method = 'immediate'
else:
self.apply_method = 'pending-reboot'
self.group.connection.modify_parameter_group(self.group.name, [self])
| {
"content_hash": "abe7f3a0a3e5536d6d7a0982e27f96a1",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 86,
"avg_line_length": 33.34444444444444,
"alnum_prop": 0.5461512829056981,
"repo_name": "donny/mako-mori",
"id": "e52890cf737fb49a7d37f67f81bcf65d1ba8a2ea",
"size": "7106",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "external/boto/rds/parametergroup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3180853"
}
],
"symlink_target": ""
} |
UserFile='./UserList.txt'
| {
"content_hash": "56f264fec30f9ff79766d7d947b5c2df",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 26,
"alnum_prop": 0.7307692307692307,
"repo_name": "51reboot/actual_09_homework",
"id": "87ccc078652492cc4354c412494d50bfc28daee6",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "05/qicheng/gconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4623850"
},
{
"name": "HTML",
"bytes": "90670692"
},
{
"name": "JavaScript",
"bytes": "31827839"
},
{
"name": "Nginx",
"bytes": "1073"
},
{
"name": "PHP",
"bytes": "349512"
},
{
"name": "Python",
"bytes": "1705997"
},
{
"name": "Shell",
"bytes": "10001"
},
{
"name": "Smarty",
"bytes": "342164"
}
],
"symlink_target": ""
} |
import re
from .._compat import to_unicode, to_native
from .basic import Validator
from .consist import isEmail
from .helpers import translate, options_sorter
# TODO port this
class isStrong(object):
"""
enforces complexity requirements on a field
"""
lowerset = frozenset(u'abcdefghijklmnopqrstuvwxyz')
upperset = frozenset(u'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
numberset = frozenset(u'0123456789')
sym1set = frozenset(u'!@#$%^&*()')
sym2set = frozenset(u'~`-_=+[]{}\\|;:\'",.<>?/')
otherset = frozenset(u'0123456789abcdefghijklmnopqrstuvwxyz')
def __init__(self, min=None, max=None, upper=None, lower=None, number=None,
entropy=None,
special=None, specials=r'~!@#$%^&*()_+-=?<>,.:;{}[]|',
invalid=' "', error_message=None, es=False):
self.entropy = entropy
if entropy is None:
# enforce default requirements
self.min = 8 if min is None else min
self.max = max # was 20, but that doesn't make sense
self.upper = 1 if upper is None else upper
self.lower = 1 if lower is None else lower
self.number = 1 if number is None else number
self.special = 1 if special is None else special
else:
# by default, an entropy spec is exclusive
self.min = min
self.max = max
self.upper = upper
self.lower = lower
self.number = number
self.special = special
self.specials = specials
self.invalid = invalid
self.error_message = error_message
self.estring = es # return error message as string (for doctest)
def __call__(self, value):
failures = []
if value and len(value) == value.count('*') > 4:
return (value, None)
if self.entropy is not None:
entropy = isStrong.calc_entropy(value)
if entropy < self.entropy:
failures.append(
translate(
"Entropy (%(have)s) less than required (%(need)s)")
% dict(have=entropy, need=self.entropy))
if type(self.min) == int and self.min > 0:
if not len(value) >= self.min:
failures.append(translate("Minimum length is %s") % self.min)
if type(self.max) == int and self.max > 0:
if not len(value) <= self.max:
failures.append(translate("Maximum length is %s") % self.max)
if type(self.special) == int:
all_special = [ch in value for ch in self.specials]
if self.special > 0:
if not all_special.count(True) >= self.special:
failures.append(
translate(
"Must include at least %s of the following: %s")
% (self.special, self.specials))
if self.invalid:
all_invalid = [ch in value for ch in self.invalid]
if all_invalid.count(True) > 0:
failures.append(
translate(
"May not contain any of the following: %s")
% self.invalid)
if type(self.upper) == int:
all_upper = re.findall("[A-Z]", value)
if self.upper > 0:
if not len(all_upper) >= self.upper:
failures.append(
translate("Must include at least %s upper case")
% str(self.upper))
else:
if len(all_upper) > 0:
failures.append(
translate("May not include any upper case letters"))
if type(self.lower) == int:
all_lower = re.findall("[a-z]", value)
if self.lower > 0:
if not len(all_lower) >= self.lower:
failures.append(
translate("Must include at least %s lower case")
% str(self.lower))
else:
if len(all_lower) > 0:
failures.append(
translate("May not include any lower case letters"))
if type(self.number) == int:
all_number = re.findall("[0-9]", value)
if self.number > 0:
numbers = "number"
if self.number > 1:
numbers = "numbers"
if not len(all_number) >= self.number:
failures.append(translate("Must include at least %s %s")
% (str(self.number), numbers))
else:
if len(all_number) > 0:
failures.append(translate("May not include any numbers"))
if len(failures) == 0:
return (value, None)
if not self.error_message:
if self.estring:
return (value, '|'.join(failures))
from .templating import NOESCAPE
return (value, NOESCAPE('<br />'.join(failures)))
else:
return (value, translate(self.error_message))
@staticmethod
def calc_entropy(string):
" calculates a simple entropy for a given string "
import math
alphabet = 0 # alphabet size
other = set()
seen = set()
lastset = None
string = to_unicode(string)
for c in string:
# classify this character
inset = isStrong.otherset
for cset in (isStrong.lowerset, isStrong.upperset,
isStrong.numberset, isStrong.sym1set,
isStrong.sym2set):
if c in cset:
inset = cset
break
# calculate effect of character on alphabet size
if inset not in seen:
seen.add(inset)
alphabet += len(inset) # credit for a new character set
elif c not in other:
alphabet += 1 # credit for unique characters
other.add(c)
if inset is not lastset:
alphabet += 1 # credit for set transitions
lastset = cset
entropy = len(
string) * math.log(alphabet) / 0.6931471805599453 # math.log(2)
return round(entropy, 2)
# TODO port this
class FilenameMatches(Validator):
"""
Checks if name and extension of file uploaded through file input matches
given criteria.
Does *not* ensure the file type in any way. Returns validation failure
if no data was uploaded.
Args:
filename: filename (before dot) regex
extension: extension (after dot) regex
lastdot: which dot should be used as a filename / extension separator:
True means last dot, eg. file.png -> file / png
False means first dot, eg. file.tar.gz -> file / tar.gz
case:
0 - keep the case,
1 - transform the string into lowercase (default),
2 - transform the string into uppercase
If there is no dot present, extension checks will be done against empty
string and filename checks against whole value.
"""
def __init__(self, filename=None, extension=None, lastdot=True, case=1,
error_message='Enter valid filename'):
if isinstance(filename, str):
filename = re.compile(filename)
if isinstance(extension, str):
extension = re.compile(extension)
self.filename = filename
self.extension = extension
self.lastdot = lastdot
self.case = case
self.error_message = error_message
def __call__(self, value):
try:
string = value.filename
except:
return (value, translate(self.error_message))
if self.case == 1:
string = string.lower()
elif self.case == 2:
string = string.upper()
if self.lastdot:
dot = string.rfind('.')
else:
dot = string.find('.')
if dot == -1:
dot = len(string)
if self.filename and not self.filename.match(string[:dot]):
return (value, translate(self.error_message))
elif self.extension and not self.extension.match(string[dot + 1:]):
return (value, translate(self.error_message))
else:
return (value, None)
# Kept for reference (v0.3 and below)
class isEmailList(object):
split_emails = re.compile('[^,;\s]+')
def __init__(self, error_message='Invalid emails: %s'):
self.error_message = error_message
def __call__(self, value):
bad_emails = []
emails = []
f = isEmail()
for email in self.split_emails.findall(value):
if email not in emails:
emails.append(email)
error = f(email)[1]
if error and email not in bad_emails:
bad_emails.append(email)
if not bad_emails:
return (value, None)
else:
return (value,
translate(self.error_message) % ', '.join(bad_emails))
def formatter(self, value, row=None):
return ', '.join(value or [])
# Kept for reference (v0.3 and below)
class inDb(Validator):
"""
Used for reference fields, rendered as a dropbox
"""
regex1 = re.compile('\w+\.\w+')
regex2 = re.compile('%\(([^\)]+)\)\d*(?:\.\d+)?[a-zA-Z]')
def __init__(
self,
dbset,
field,
label=None,
error_message='Value not in database',
orderby=None,
groupby=None,
distinct=None,
cache=None,
multiple=False,
zero='',
sort=False,
_and=None,
):
from pydal.objects import Table
if isinstance(field, Table):
field = field._id
if hasattr(dbset, 'define_table'):
self.dbset = dbset()
else:
self.dbset = dbset
(ktable, kfield) = str(field).split('.')
if not label:
label = '%%(%s)s' % kfield
if isinstance(label, str):
if self.regex1.match(str(label)):
label = '%%(%s)s' % str(label).split('.')[-1]
ks = self.regex2.findall(label)
if kfield not in ks:
ks += [kfield]
fields = ks
else:
ks = [kfield]
fields = 'all'
self.fields = fields
self.label = label
self.ktable = ktable
self.kfield = kfield
self.ks = ks
self.error_message = error_message
self.theset = None
self.orderby = orderby
self.groupby = groupby
self.distinct = distinct
self.cache = cache
self.multiple = multiple
self.zero = zero
self.sort = sort
self._and = _and
def set_self_id(self, id):
if self._and:
self._and.record_id = id
def build_set(self):
from pydal.objects import FieldVirtual, FieldMethod
table = self.dbset.db[self.ktable]
if self.fields == 'all':
fields = [f for f in table]
else:
fields = [table[k] for k in self.fields]
ignore = (FieldVirtual, FieldMethod)
fields = filter(lambda f: not isinstance(f, ignore), fields)
if self.dbset.db._dbname != 'gae':
orderby = self.orderby or reduce(lambda a, b: a | b, fields)
groupby = self.groupby
distinct = self.distinct
dd = dict(orderby=orderby, groupby=groupby,
distinct=distinct, cache=self.cache,
cacheable=True)
records = self.dbset(table).select(*fields, **dd)
else:
orderby = self.orderby or \
reduce(lambda a, b: a | b, (
f for f in fields if not f.name == 'id'))
dd = dict(orderby=orderby, cache=self.cache, cacheable=True)
records = self.dbset(table).select(table.ALL, **dd)
self.theset = [str(r[self.kfield]) for r in records]
if isinstance(self.label, str):
self.labels = [self.label % r for r in records]
else:
self.labels = [self.label(r) for r in records]
def options(self, zero=True):
self.build_set()
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(options_sorter)
if zero and self.zero is not None and not self.multiple:
items.insert(0, ('', self.zero))
return items
def __call__(self, value):
table = self.dbset.db[self.ktable]
field = table[self.kfield]
if self.multiple:
if self._and:
raise NotImplementedError
if isinstance(value, list):
values = value
elif value:
values = [value]
else:
values = []
if isinstance(self.multiple, (tuple, list)) and \
not self.multiple[0] <= len(values) < self.multiple[1]:
return values, translate(self.error_message)
if self.theset:
if not [v for v in values if v not in self.theset]:
return values, None
else:
from pydal.adapters import GoogleDatastoreAdapter
def count(values, s=self.dbset, f=field):
return s(f.belongs(map(int, values))).count()
if isinstance(self.dbset.db._adapter, GoogleDatastoreAdapter):
range_ids = range(0, len(values), 30)
total = sum(count(values[i:i + 30]) for i in range_ids)
if total == len(values):
return values, None
elif count(values) == len(values):
return values, None
elif self.theset:
if str(value) in self.theset:
if self._and:
return self._and(value)
else:
return value, None
else:
if self.dbset(field == value).count():
if self._and:
return self._and(value)
else:
return value, None
return value, translate(self.error_message)
# Kept for reference (v0.3 and below)
class notInDb(Validator):
"""
makes the field unique
"""
def __init__(
self,
dbset,
field,
error_message='Value already in database or empty',
allowed_override=[],
ignore_common_filters=False,
):
from pydal.objects import Table
if isinstance(field, Table):
field = field._id
if hasattr(dbset, 'define_table'):
self.dbset = dbset()
else:
self.dbset = dbset
self.field = field
self.error_message = error_message
self.record_id = 0
self.allowed_override = allowed_override
self.ignore_common_filters = ignore_common_filters
def set_self_id(self, id):
self.record_id = id
def __call__(self, value):
value = to_native(to_unicode(value))
if not value.strip():
return value, translate(self.error_message)
if value in self.allowed_override:
return value, None
(tablename, fieldname) = str(self.field).split('.')
table = self.dbset.db[tablename]
field = table[fieldname]
subset = self.dbset(field == value,
ignore_common_filters=self.ignore_common_filters)
id = self.record_id
if isinstance(id, dict):
fields = [table[f] for f in id]
row = subset.select(*fields, **dict(
limitby=(0, 1), orderby_on_limitby=False)).first()
if row and any(str(row[f]) != str(id[f]) for f in id):
return value, translate(self.error_message)
else:
row = subset.select(table._id, field, limitby=(0, 1),
orderby_on_limitby=False).first()
if row and str(row.id) != str(id):
return value, translate(self.error_message)
return value, None
| {
"content_hash": "b6eefc332d12eeb29c0edb811c292643",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 79,
"avg_line_length": 36.89164785553047,
"alnum_prop": 0.522486691549899,
"repo_name": "daxslab/web2py-model-class-declaration",
"id": "3f0f39bac6f421313acb946528a94ac06ece958c",
"size": "16343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/plugin_model_class_declaration/validators/_old.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136481"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
} |
import unittest
import time
from tools import tob
import sys
import os
import signal
import socket
from subprocess import Popen, PIPE
import tools
from bottle import server_names
try:
from urllib.request import urlopen
except:
from urllib2 import urlopen
serverscript = os.path.join(os.path.dirname(__file__), 'servertest.py')
def ping(server, port):
''' Check if a server accepts connections on a specific TCP port '''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((server, port))
return True
except socket.error:
return False
finally:
s.close()
class TestServer(unittest.TestCase):
server = 'wsgiref'
skip = False
def setUp(self):
self.skip = self.skip or 'fast' in sys.argv
if self.skip: return
# Find a free port
for port in range(8800, 8900):
self.port = port
# Start servertest.py in a subprocess
cmd = [sys.executable, serverscript, self.server, str(port)]
cmd += sys.argv[1:] # pass cmdline arguments to subprocesses
self.p = Popen(cmd, stdout=PIPE, stderr=PIPE)
# Wait for the socket to accept connections
for i in range(100):
time.sleep(0.1)
# Accepts connections?
if ping('127.0.0.1', port): return
# Server died for some reason...
if not self.p.poll() is None: break
rv = self.p.poll()
if rv is None:
raise AssertionError("Server took too long to start up.")
if rv is 128: # Import error
tools.warn("Skipping %r test (ImportError)." % self.server)
self.skip = True
return
if rv is 3: # Port in use
continue
raise AssertionError("Server exited with error code %d" % rv)
raise AssertionError("Could not find a free port to test server.")
def tearDown(self):
if self.skip: return
if self.p.poll() == None:
os.kill(self.p.pid, signal.SIGINT)
time.sleep(0.5)
if self.p.poll() == None:
os.kill(self.p.pid, signal.SIGTERM)
time.sleep(0.5)
while self.p.poll() == None:
tools.warn("Trying to kill server %r with pid %d." %
(self.server, self.p.pid))
os.kill(self.p.pid, signal.SIGKILL)
time.sleep(1)
lines = [line for stream in (self.p.stdout, self.p.stderr) for line in stream]
for line in lines:
if tob('warning') in line.lower():
tools.warn(line.strip().decode('utf8'))
elif tob('error') in line.lower():
raise AssertionError(line.strip().decode('utf8'))
def fetch(self, url):
try:
return urlopen('http://127.0.0.1:%d/%s' % (self.port, url), None, 5).read()
except Exception as E:
return repr(E)
def test_simple(self):
''' Test a simple static page with this server adapter. '''
if self.skip: return
self.assertEqual(tob('OK'), self.fetch('test'))
blacklist = ['cgi', 'flup', 'gae']
for name in set(server_names) - set(blacklist):
classname = 'TestServerAdapter_'+name
setattr(sys.modules[__name__], classname,
type(classname, (TestServer,), {'server': name}))
| {
"content_hash": "307d6dacd985a74499ff87557e7bc699",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 87,
"avg_line_length": 32.97115384615385,
"alnum_prop": 0.568387284922718,
"repo_name": "Inndy/bottle",
"id": "610006c04d12ec7460a399e58cd31e86b5b5ce65",
"size": "3453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1806"
},
{
"name": "Python",
"bytes": "307347"
},
{
"name": "Shell",
"bytes": "1592"
},
{
"name": "Smarty",
"bytes": "580"
}
],
"symlink_target": ""
} |
import os
import re
import subprocess
import sys
from infra import git
from infra import go
_TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
_REPO_ROOT = os.path.realpath(os.path.join(_TOOLS_DIR, os.pardir))
_INFRA_BOTS = os.path.join(_REPO_ROOT, 'infra', 'bots')
sys.path.insert(0, _INFRA_BOTS)
import git_utils
REFS_HEADS_PREFIX = 'refs/heads/'
CHROME_REF_PREFIX = REFS_HEADS_PREFIX + 'chrome/m'
SK_MILESTONE_H = os.path.join('include', 'core', 'SkMilestone.h')
SK_MILESTONE_TMPL = r'#define SK_MILESTONE %s'
SK_MILESTONE_RE = SK_MILESTONE_TMPL % r'(\d+)'
SKIA_REPO = 'https://skia.googlesource.com/skia.git'
SUPPORTED_CHROME_BRANCHES = 3
UPDATE_MILESTONE_COMMIT_MSG = '''Update Skia milestone to %d'''
def get_current_milestone():
'''Read SkMilestone.h and parse out the current milestone.'''
sk_milestone = os.path.join(_REPO_ROOT, SK_MILESTONE_H)
with open(sk_milestone, 'r') as f:
contents = f.read()
for line in contents.splitlines():
m = re.match(SK_MILESTONE_RE, line)
if m:
return int(m.groups()[0])
print >> sys.stderr, (
'Failed to parse %s; has the format changed?' % SK_MILESTONE_H)
sys.exit(1)
def create_new_branch(new_branch, branch_at):
'''Create a temporary checkout of the repo, create the new branch and push.'''
b = new_branch[len(REFS_HEADS_PREFIX):]
with git_utils.NewGitCheckout(SKIA_REPO, local=_REPO_ROOT):
git.git('checkout', '-b', b)
git.git('reset', '--hard', branch_at)
git.git('push', '--set-upstream', 'origin', b)
def update_milestone(m):
'''Update SkMilestone.h to match the given milestone number.'''
with git_utils.NewGitCheckout(SKIA_REPO, local=_REPO_ROOT):
with git_utils.GitBranch(
'update_milestone', UPDATE_MILESTONE_COMMIT_MSG % m):
with open(SK_MILESTONE_H, 'r+') as f:
contents = re.sub(
SK_MILESTONE_RE, SK_MILESTONE_TMPL % str(m), f.read(), flags=re.M)
f.seek(0)
f.write(contents)
f.truncate()
git.git('diff')
def update_infra_config(old_branch, new_branch):
'''Create a CL to add infra support for the new branch and remove the old.'''
owner = git.git('config', 'user.email').rstrip()
if not owner:
print >> sys.stderr, ('No configured git user; please run '
'"git config user.email <your email>".')
sys.exit(1)
go.mod_download()
go.install(go.INFRA_GO+'/go/supported_branches/cmd/new-branch')
subprocess.check_call(['new-branch',
'--branch', new_branch[len(REFS_HEADS_PREFIX):],
'--delete', old_branch[len(REFS_HEADS_PREFIX):],
'--owner', owner,
'--exclude-trybots=chromium.*',
'--exclude-trybots=.*Android_Framework.*',
'--exclude-trybots=.*G3_Framework.*',
'--submit'])
def main():
if len(sys.argv) != 2 or '--help' in sys.argv or '-h' in sys.argv:
print >> sys.stderr, 'Usage: %s <commit hash for branch>' % sys.argv[0]
sys.exit(1)
go.check()
branch_at = sys.argv[1]
m = get_current_milestone()
new_branch = '%s%d' % (CHROME_REF_PREFIX, m)
old_branch = '%s%d' % (CHROME_REF_PREFIX, m-SUPPORTED_CHROME_BRANCHES)
print 'Creating branch %s and removing support (eg. CQ) for %s' % (
new_branch, old_branch)
create_new_branch(new_branch, branch_at)
update_milestone(m+1)
update_infra_config(old_branch, new_branch)
if __name__ == '__main__':
main()
| {
"content_hash": "02e6400a8ae447d16308f08e5331ecc3",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 80,
"avg_line_length": 35.38383838383838,
"alnum_prop": 0.6208963745361119,
"repo_name": "endlessm/chromium-browser",
"id": "7ebf4bc42e73714ea4b66ae76bc375aaf6965a5b",
"size": "3662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/skia/tools/chrome_release_branch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_todos', '0003_auto_20150708_1043'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='meteor_id',
),
]
| {
"content_hash": "821717d2b2a62628f6b018faeb5e625c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 19.647058823529413,
"alnum_prop": 0.5898203592814372,
"repo_name": "cxhandley/django-ddp-meteor-todo",
"id": "14be2f441bfc5d6a06e758910e06c2bc4e8ba9b3",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_todos/migrations/0004_remove_task_meteor_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1714"
},
{
"name": "HTML",
"bytes": "1098"
},
{
"name": "JavaScript",
"bytes": "3566"
},
{
"name": "Python",
"bytes": "25182"
}
],
"symlink_target": ""
} |
import json
import os
import time
from gym import error
from gym.utils import atomic_write
class StatsRecorder(object):
def __init__(self, directory, file_prefix):
self.initial_reset_timestamp = None
self.directory = directory
self.file_prefix = file_prefix
self.episode_lengths = []
self.episode_rewards = []
self.timestamps = []
self.steps = None
self.rewards = None
self.done = None
self.closed = False
filename = '{}.stats.json'.format(self.file_prefix)
self.path = os.path.join(self.directory, filename)
def before_step(self, action):
assert not self.closed
if self.done:
raise error.ResetNeeded("Trying to step environment which is currently done. While the monitor is active, you cannot step beyond the end of an episode. Call 'env.reset()' to start the next episode.")
elif self.steps is None:
raise error.ResetNeeded("Trying to step an environment before reset. While the monitor is active, you must call 'env.reset()' before taking an initial step.")
def after_step(self, observation, reward, done, info):
self.steps += 1
self.rewards += reward
if done:
self.done = True
def before_reset(self):
assert not self.closed
self.done = False
if self.initial_reset_timestamp is None:
self.initial_reset_timestamp = time.time()
def after_reset(self, observation):
self.save_complete()
self.steps = 0
self.rewards = 0
def save_complete(self):
if self.steps is not None:
self.episode_lengths.append(self.steps)
self.episode_rewards.append(self.rewards)
self.timestamps.append(time.time())
def close(self):
self.save_complete()
self.flush()
self.closed = True
def flush(self):
if self.closed:
return
with atomic_write.atomic_write(self.path) as f:
json.dump({
'initial_reset_timestamp': self.initial_reset_timestamp,
'timestamps': self.timestamps,
'episode_lengths': self.episode_lengths,
'episode_rewards': self.episode_rewards,
}, f)
| {
"content_hash": "763d0b973c4f94a6b6ee2342bdcc647e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 211,
"avg_line_length": 32.013888888888886,
"alnum_prop": 0.6073752711496746,
"repo_name": "machinaut/gym",
"id": "9aaabd126c60090a30edd335ac7d8ebd4851ba52",
"size": "2305",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gym/monitoring/stats_recorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "448298"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
import horizon
from horizon.dashboards.nova import dashboard
class InstancesAndVolumes(horizon.Panel):
name = "Instances & Volumes"
slug = 'instances_and_volumes'
dashboard.Nova.register(InstancesAndVolumes)
| {
"content_hash": "c4c35aa720e87d92cd69ed965ea6f96d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 22,
"alnum_prop": 0.7818181818181819,
"repo_name": "andrewsmedina/horizon",
"id": "d826f78e95481b6a412adf8146ef51b349cb7cd1",
"size": "870",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "horizon/horizon/dashboards/nova/instances_and_volumes/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from datetime import datetime
from opendc.models.authorization import Authorization
from opendc.models.datacenter import Datacenter
from opendc.models.path import Path
from opendc.models.section import Section
from opendc.models.simulation import Simulation
from opendc.models.user import User
from opendc.util import database, exceptions
from opendc.util.rest import Response
def POST(request):
"""Create a new simulation, and return that new simulation."""
# Make sure required parameters are there
try:
request.check_required_parameters(
body={
'simulation': {
'name': 'string'
}
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate a Simulation
simulation_data = request.params_body['simulation']
simulation_data['datetimeCreated'] = database.datetime_to_string(datetime.now())
simulation_data['datetimeLastEdited'] = database.datetime_to_string(datetime.now())
simulation = Simulation.from_JSON(simulation_data)
# Insert this Simulation into the database
simulation.insert()
# Instantiate an Authorization and insert it into the database
authorization = Authorization(
user_id=User.from_google_id(request.google_id).id,
simulation_id=simulation.id,
authorization_level='OWN'
)
authorization.insert()
# Instantiate a Path and insert it into the database
path = Path(
simulation_id=simulation.id,
datetime_created=database.datetime_to_string(datetime.now())
)
path.insert()
# Instantiate a Datacenter and insert it into the database
datacenter = Datacenter(
starred=0,
simulation_id=simulation.id
)
datacenter.insert()
# Instantiate a Section and insert it into the database
section = Section(
path_id=path.id,
datacenter_id=datacenter.id,
start_tick=0
)
section.insert()
# Return this Simulation
return Response(
200,
'Successfully created {}.'.format(simulation),
simulation.to_JSON()
)
| {
"content_hash": "e9692bb429192d152985137412a149e8",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 87,
"avg_line_length": 25.03448275862069,
"alnum_prop": 0.6689623507805326,
"repo_name": "atlarge-research/opendc-web-server",
"id": "a86377284a46a6c24466fb8cffb7a90428c8aaac",
"size": "2178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opendc/api/v1/simulations/endpoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "848"
},
{
"name": "Python",
"bytes": "145921"
}
],
"symlink_target": ""
} |
"""Console script for sheets."""
import click
import os
import threading
default_data_path = os.path.normpath(os.path.join(os.path.abspath(__file__), "../../data"))
@click.command()
@click.argument("path", default=default_data_path)
def main(path):
"""Console script for sheets."""
from . import http
path = os.path.abspath(path)
os.chdir(path)
t = threading.Thread(target=http.start)
t.start()
print("Starting socket server on http://localhost:10101")
from .server import run_server
from .env import Environment
from .datalayer import Model
from .router import Router
from .history import History
from . import importwatch
importwatch.activate()
env = Environment(path)
history = History(path)
model = Model(env, history)
router = Router(env=env, model=model)
router.register()
print("Saving files in %s" % env.path)
run_server(10101)
if __name__ == "__main__":
main()
| {
"content_hash": "e1fb98cd315c2261531bab42817601a0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 91,
"avg_line_length": 26.75,
"alnum_prop": 0.6635514018691588,
"repo_name": "ianb/sheets",
"id": "552846ebf6a381605f9699f96a52bbd9430ddbb3",
"size": "988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sheets/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "505"
},
{
"name": "HTML",
"bytes": "407"
},
{
"name": "JavaScript",
"bytes": "32419"
},
{
"name": "Makefile",
"bytes": "2265"
},
{
"name": "Python",
"bytes": "50903"
}
],
"symlink_target": ""
} |
import collections
import copy
import json
import logging
import re
from test_result_util import ResultCollection, TestResult, TestStatus
LOGGER = logging.getLogger(__name__)
# These labels should match the ones output by gtest's JSON.
TEST_UNKNOWN_LABEL = 'UNKNOWN'
TEST_SUCCESS_LABEL = 'SUCCESS'
TEST_FAILURE_LABEL = 'FAILURE'
TEST_SKIPPED_LABEL = 'SKIPPED'
TEST_TIMEOUT_LABEL = 'TIMEOUT'
TEST_WARNING_LABEL = 'WARNING'
class GTestResult(object):
"""A result of gtest.
The class will be depreacated soon. Please use
|test_result_util.ResultCollection| instead. (crbug.com/1132476)
Properties:
command: The command argv.
crashed: Whether or not the test crashed.
crashed_test: The name of the test during which execution crashed, or
None if a particular test didn't crash.
failed_tests: A dict mapping the names of failed tests to a list of
lines of output from those tests.
flaked_tests: A dict mapping the names of failed flaky tests to a list
of lines of output from those tests.
passed_tests: A list of passed tests.
perf_links: A dict mapping the names of perf data points collected
to links to view those graphs.
return_code: The return code of the command.
success: Whether or not this run of the command was considered a
successful GTest execution.
"""
@property
def crashed(self):
return self._crashed
@property
def crashed_test(self):
return self._crashed_test
@property
def command(self):
return self._command
@property
def disabled_tests_from_compiled_tests_file(self):
if self.__finalized:
return copy.deepcopy(self._disabled_tests_from_compiled_tests_file)
return self._disabled_tests_from_compiled_tests_file
@property
def failed_tests(self):
if self.__finalized:
return copy.deepcopy(self._failed_tests)
return self._failed_tests
@property
def flaked_tests(self):
if self.__finalized:
return copy.deepcopy(self._flaked_tests)
return self._flaked_tests
@property
def passed_tests(self):
if self.__finalized:
return copy.deepcopy(self._passed_tests)
return self._passed_tests
@property
def perf_links(self):
if self.__finalized:
return copy.deepcopy(self._perf_links)
return self._perf_links
@property
def return_code(self):
return self._return_code
@property
def success(self):
return self._success
def __init__(self, command):
if not isinstance(command, collections.Iterable):
raise ValueError('Expected an iterable of command arguments.', command)
if not command:
raise ValueError('Expected a non-empty command.', command)
self._command = tuple(command)
self._crashed = False
self._crashed_test = None
self._disabled_tests_from_compiled_tests_file = []
self._failed_tests = collections.OrderedDict()
self._flaked_tests = collections.OrderedDict()
self._passed_tests = []
self._perf_links = collections.OrderedDict()
self._return_code = None
self._success = None
self.__finalized = False
def finalize(self, return_code, success):
self._return_code = return_code
self._success = success
# If the test was not considered to be a GTest success, but had no
# failing tests, conclude that it must have crashed.
if not self._success and not self._failed_tests and not self._flaked_tests:
self._crashed = True
# At most one test can crash the entire app in a given parsing.
for test, log_lines in self._failed_tests.items():
# A test with no output would have crashed. No output is replaced
# by the GTestLogParser by a sentence indicating non-completion.
if 'Did not complete.' in log_lines:
self._crashed = True
self._crashed_test = test
# A test marked as flaky may also have crashed the app.
for test, log_lines in self._flaked_tests.items():
if 'Did not complete.' in log_lines:
self._crashed = True
self._crashed_test = test
self.__finalized = True
class GTestLogParser(object):
"""This helper class process GTest test output."""
def __init__(self):
# Test results from the parser.
self._result_collection = ResultCollection()
# State tracking for log parsing
self.completed = False
self._current_test = ''
self._failure_description = []
self._parsing_failures = False
# Line number currently being processed.
self._line_number = 0
# List of parsing errors, as human-readable strings.
self._internal_error_lines = []
# Tests are stored here as 'test.name': (status, [description]).
# The status should be one of ('started', 'OK', 'failed', 'timeout',
# 'warning'). Warning indicates that a test did not pass when run in
# parallel with other tests but passed when run alone. The description is
# a list of lines detailing the test's error, as reported in the log.
self._test_status = {}
# This may be either text or a number. It will be used in the phrase
# '%s disabled' or '%s flaky' on the waterfall display.
self._disabled_tests = 0
# Disabled tests by parsing the compiled tests json file output from GTest.
self._disabled_tests_from_compiled_tests_file = []
self._flaky_tests = 0
# Regular expressions for parsing GTest logs. Test names look like
# "x.y", with 0 or more "w/" prefixes and 0 or more "/z" suffixes.
# e.g.:
# SomeName/SomeTestCase.SomeTest/1
# SomeName/SomeTestCase/1.SomeTest
# SomeName/SomeTestCase/1.SomeTest/SomeModifider
test_name_regexp = r'((\w+/)*\w+\.\w+(/\w+)*)'
self._master_name_re = re.compile(r'\[Running for master: "([^"]*)"')
self.master_name = ''
self._test_name = re.compile(test_name_regexp)
self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
self._test_skipped = re.compile(r'\[\s+SKIPPED\s+\] ' + test_name_regexp)
self._run_test_cases_line = re.compile(
r'\[\s*\d+\/\d+\]\s+[0-9\.]+s ' + test_name_regexp + ' .+')
self._test_timeout = re.compile(
r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
self._disabled = re.compile(r'\s*YOU HAVE (\d+) DISABLED TEST')
self._flaky = re.compile(r'\s*YOU HAVE (\d+) FLAKY TEST')
self._retry_message = re.compile('RETRYING FAILED TESTS:')
self.retrying_failed = False
self._compiled_tests_file_path = re.compile(
'.*Wrote compiled tests to file: (\S+)')
self.TEST_STATUS_MAP = {
'OK': TEST_SUCCESS_LABEL,
'failed': TEST_FAILURE_LABEL,
'skipped': TEST_SKIPPED_LABEL,
'timeout': TEST_TIMEOUT_LABEL,
'warning': TEST_WARNING_LABEL
}
def GetCurrentTest(self):
return self._current_test
def GetResultCollection(self):
return self._result_collection
def _StatusOfTest(self, test):
"""Returns the status code for the given test, or 'not known'."""
test_status = self._test_status.get(test, ('not known', []))
return test_status[0]
def _TestsByStatus(self, status, include_fails, include_flaky):
"""Returns list of tests with the given status.
Args:
include_fails: If False, tests containing 'FAILS_' anywhere in their
names will be excluded from the list.
include_flaky: If False, tests containing 'FLAKY_' anywhere in their
names will be excluded from the list.
"""
test_list = [x[0] for x in self._test_status.items()
if self._StatusOfTest(x[0]) == status]
if not include_fails:
test_list = [x for x in test_list if x.find('FAILS_') == -1]
if not include_flaky:
test_list = [x for x in test_list if x.find('FLAKY_') == -1]
return test_list
def _RecordError(self, line, reason):
"""Record a log line that produced a parsing error.
Args:
line: text of the line at which the error occurred
reason: a string describing the error
"""
self._internal_error_lines.append('%s: %s [%s]' %
(self._line_number, line.strip(), reason))
def RunningTests(self):
"""Returns list of tests that appear to be currently running."""
return self._TestsByStatus('started', True, True)
def ParsingErrors(self):
"""Returns a list of lines that have caused parsing errors."""
return self._internal_error_lines
def ClearParsingErrors(self):
"""Clears the currently stored parsing errors."""
self._internal_error_lines = ['Cleared.']
def PassedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that passed."""
return self._TestsByStatus('OK', include_fails, include_flaky)
def FailedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that failed, timed out, or didn't finish
(crashed).
This list will be incorrect until the complete log has been processed,
because it will show currently running tests as having failed.
Args:
include_fails: If true, all failing tests with FAILS_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
include_flaky: If true, all failing tests with FLAKY_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
"""
return (self._TestsByStatus('failed', include_fails, include_flaky) +
self._TestsByStatus('timeout', True, True) +
self._TestsByStatus('warning', include_fails, include_flaky) +
self.RunningTests())
def SkippedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that were skipped"""
return self._TestsByStatus('skipped', include_fails, include_flaky)
def TriesForTest(self, test):
"""Returns a list containing the state for all tries of the given test.
This parser doesn't support retries so a single result is returned."""
return [self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
TEST_UNKNOWN_LABEL)]
def DisabledTests(self):
"""Returns the name of the disabled test (if there is only 1) or the number
of disabled tests.
"""
return self._disabled_tests
def DisabledTestsFromCompiledTestsFile(self):
"""Returns the list of disabled tests in format '{TestCaseName}/{TestName}'.
Find all test names starting with DISABLED_ from the compiled test json
file if there is one. If there isn't or error in parsing, returns an
empty list.
"""
return self._disabled_tests_from_compiled_tests_file
def FlakyTests(self):
"""Returns the name of the flaky test (if there is only 1) or the number
of flaky tests.
"""
return self._flaky_tests
def FailureDescription(self, test):
"""Returns a list containing the failure description for the given test.
If the test didn't fail or timeout, returns [].
"""
test_status = self._test_status.get(test, ('', []))
return ['%s: ' % test] + test_status[1]
def CompletedWithoutFailure(self):
"""Returns True if all tests completed and no tests failed unexpectedly."""
return self.completed and not self.FailedTests()
def Finalize(self):
"""Finalize for |self._result_collection|.
Called at the end to add unfinished tests and crash status for
self._result_collection.
"""
for test in self.RunningTests():
self._result_collection.add_test_result(
TestResult(test, TestStatus.CRASH, test_log='Did not complete.'))
self._result_collection.crashed = True
if not self.completed:
self._result_collection.crashed = True
def ProcessLine(self, line):
"""This is called once with each line of the test log."""
# Track line number for error messages.
self._line_number += 1
# Some tests (net_unittests in particular) run subprocesses which can write
# stuff to shared stdout buffer. Sometimes such output appears between new
# line and gtest directives ('[ RUN ]', etc) which breaks the parser.
# Code below tries to detect such cases and recognize a mixed line as two
# separate lines.
# List of regexps that parses expects to find at the start of a line but
# which can be somewhere in the middle.
gtest_regexps = [
self._test_start,
self._test_ok,
self._test_fail,
self._test_passed,
self._test_skipped,
]
for regexp in gtest_regexps:
match = regexp.search(line)
if match:
break
if not match or match.start() == 0:
self._ProcessLine(line)
else:
self._ProcessLine(line[:match.start()])
self._ProcessLine(line[match.start():])
def _ProcessLine(self, line):
"""Parses the line and changes the state of parsed tests accordingly.
Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
"""
# Note: When sharding, the number of disabled and flaky tests will be read
# multiple times, so this will only show the most recent values (but they
# should all be the same anyway).
# Is it a line listing the master name?
if not self.master_name:
results = self._master_name_re.match(line)
if results:
self.master_name = results.group(1)
results = self._run_test_cases_line.match(line)
if results:
# A run_test_cases.py output.
if self._current_test:
if self._test_status[self._current_test][0] == 'started':
self._test_status[self._current_test] = (
'timeout', self._failure_description)
self._result_collection.add_test_result(
TestResult(
self._current_test,
TestStatus.ABORT,
test_log='\n'.join(self._failure_description)))
self._current_test = ''
self._failure_description = []
return
# Is it a line declaring all tests passed?
results = self._test_passed.match(line)
if results:
self.completed = True
self._current_test = ''
return
# Is it a line reporting disabled tests?
results = self._disabled.match(line)
if results:
try:
disabled = int(results.group(1))
except ValueError:
disabled = 0
if disabled > 0 and isinstance(self._disabled_tests, int):
self._disabled_tests = disabled
else:
# If we can't parse the line, at least give a heads-up. This is a
# safety net for a case that shouldn't happen but isn't a fatal error.
self._disabled_tests = 'some'
return
# Is it a line reporting flaky tests?
results = self._flaky.match(line)
if results:
try:
flaky = int(results.group(1))
except ValueError:
flaky = 0
if flaky > 0 and isinstance(self._flaky_tests, int):
self._flaky_tests = flaky
else:
# If we can't parse the line, at least give a heads-up. This is a
# safety net for a case that shouldn't happen but isn't a fatal error.
self._flaky_tests = 'some'
return
# Is it the start of a test?
results = self._test_start.match(line)
if results:
if self._current_test:
if self._test_status[self._current_test][0] == 'started':
self._test_status[self._current_test] = (
'timeout', self._failure_description)
self._result_collection.add_test_result(
TestResult(
self._current_test,
TestStatus.ABORT,
test_log='\n'.join(self._failure_description)))
test_name = results.group(1)
self._test_status[test_name] = ('started', ['Did not complete.'])
self._current_test = test_name
if self.retrying_failed:
self._failure_description = self._test_status[test_name][1]
self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
else:
self._failure_description = []
return
# Is it a test success line?
results = self._test_ok.match(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status != 'started':
self._RecordError(line, 'success while in status %s' % status)
if self.retrying_failed:
self._test_status[test_name] = ('warning', self._failure_description)
# This is a passed result. Previous failures were reported in separate
# TestResult objects.
self._result_collection.add_test_result(
TestResult(
test_name,
TestStatus.PASS,
test_log='\n'.join(self._failure_description)))
else:
self._test_status[test_name] = ('OK', [])
self._result_collection.add_test_result(
TestResult(test_name, TestStatus.PASS))
self._failure_description = []
self._current_test = ''
return
# Is it a test skipped line?
results = self._test_skipped.match(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
# Skipped tests are listed again in the summary.
if status not in ('started', 'skipped'):
self._RecordError(line, 'skipped while in status %s' % status)
self._test_status[test_name] = ('skipped', [])
self._result_collection.add_test_result(
TestResult(
test_name,
TestStatus.SKIP,
expected_status=TestStatus.SKIP,
test_log='Test skipped when running suite.'))
self._failure_description = []
self._current_test = ''
return
# Is it a test failure line?
results = self._test_fail.match(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status not in ('started', 'failed', 'timeout'):
self._RecordError(line, 'failure while in status %s' % status)
if self._current_test != test_name:
if self._current_test:
self._RecordError(
line,
'%s failure while in test %s' % (test_name, self._current_test))
return
# Don't overwrite the failure description when a failing test is listed a
# second time in the summary, or if it was already recorded as timing
# out.
if status not in ('failed', 'timeout'):
self._test_status[test_name] = ('failed', self._failure_description)
# Add to |test_results| regardless whether the test ran before.
self._result_collection.add_test_result(
TestResult(
test_name,
TestStatus.FAIL,
test_log='\n'.join(self._failure_description)))
self._failure_description = []
self._current_test = ''
return
# Is it a test timeout line?
results = self._test_timeout.search(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status not in ('started', 'failed'):
self._RecordError(line, 'timeout while in status %s' % status)
self._test_status[test_name] = (
'timeout', self._failure_description + ['Killed (timed out).'])
self._result_collection.add_test_result(
TestResult(
test_name,
TestStatus.ABORT,
test_log='\n'.join(self._failure_description)))
self._failure_description = []
self._current_test = ''
return
# Is it the start of the retry tests?
results = self._retry_message.match(line)
if results:
self.retrying_failed = True
return
# Is it the line containing path to the compiled tests json file?
results = self._compiled_tests_file_path.match(line)
if results:
path = results.group(1)
LOGGER.info('Compiled tests json file path: %s' % path)
try:
# TODO(crbug.com/1091345): Read the file when running on device.
with open(path) as f:
disabled_tests_from_json = []
compiled_tests = json.load(f)
for single_test in compiled_tests:
test_case_name = single_test.get('test_case_name')
test_name = single_test.get('test_name')
if test_case_name and test_name and test_name.startswith(
'DISABLED_'):
full_test_name = str('%s/%s' % (test_case_name, test_name))
disabled_tests_from_json.append(full_test_name)
self._result_collection.add_test_result(
TestResult(
test_name,
TestStatus.SKIP,
expected_status=TestStatus.SKIP,
test_log='Test disabled.'))
self._disabled_tests_from_compiled_tests_file = (
disabled_tests_from_json)
except Exception as e:
LOGGER.warning(
'Error when finding disabled tests in compiled tests json file: %s'
% e)
return
# Random line: if we're in a test, collect it for the failure description.
# Tests may run simultaneously, so this might be off, but it's worth a try.
# This also won't work if a test times out before it begins running.
if self._current_test:
self._failure_description.append(line)
# Parse the "Failing tests:" list at the end of the output, and add any
# additional failed tests to the list. For example, this includes tests
# that crash after the OK line.
if self._parsing_failures:
results = self._test_name.match(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status in ('not known', 'OK'):
unknown_error_log = 'Unknown error, see stdio log.'
self._test_status[test_name] = ('failed', [unknown_error_log])
self._result_collection.add_test_result(
TestResult(
test_name, TestStatus.FAIL, test_log=unknown_error_log))
else:
self._parsing_failures = False
elif line.startswith('Failing tests:'):
self._parsing_failures = True
| {
"content_hash": "76b53191b8f187c03c42ef21f0061073",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 80,
"avg_line_length": 36.31056910569106,
"alnum_prop": 0.63427522278447,
"repo_name": "ric2b/Vivaldi-browser",
"id": "c96ac7ee8d9cf171530d25e1cb3219df6ea0ff1d",
"size": "22498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/ios/build/bots/scripts/gtest_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import re
import sys
import types
######## Global Variables
# Stacks
dictionary = [] # Stack containing any/all dictionaries, with one created at initial runtime.
execution = [] # Stack containing current location in SPS code
operand = [] # Stack containing any/all operands
# Sept 21, 2011 -- fixed the handling of }{ -- each brace should be a separate token
# A regular expression that matches postscript each different kind of postscript token
pattern = '/?[a-zA-Z][a-zA-Z0-9_]*|[-]?[0-9]+|[}{]|%.*|[^\t\n ]'
######## Debug Functions
# Debug: Takes a string, outputs error message, current stacks, and then exits program.
def Debug(*s):
print(s)
print("REMAINING OPERAND STACK:")
for i in operand:
print (i)
print("REMAINING DICTIONARY STACK:")
for i in dictionary:
print (i)
print("REMAINING EXECUTION STACK:")
for i in execution:
print (i)
sys.exit(1)
return
######## Common Functions
#### Is Variable
# isNumber: checks if variable (x) is a number and returns result
def isNumber(x):
if type(x) == int or type(x) == float:
return True
return False
# isBool: checks if variable (x) is a bool and returns result
def isBool(x):
if type(x) == bool:
return True
return False
# isString: checks if variable (x) is a string and returns result
def isString(x):
if type(x) == str:
return True
return False
# isDict: checks if variable (x) is a dict and returns result
def isDict(x):
if type(x) is dict == False:
return False
return True
#### Convert Variable
# Converts a variable (x) to a number or bool if possible, otherwise returns variable as is
def convert(x):
try:
return int(x)
except:
try:
return float(x)
except:
if x == 'true':
return True
elif x == 'false':
return False
else:
return x
#### Pop Operands
# Pops a number from the list (L) and makes sure its a valid type. If not valid, gives error message (v,s)
def PopNumber(L,s,v):
x = ListPop(L)
if isNumber(x) == False:
Debug("%s in operation %s is not a number or doesn't exist!",v,s)
return x;
# Pops a boolean from the list (L) and makes sure its a valid type. If not valid, gives error message (v,s)
def PopBoolean(L,s,v):
x = ListPop(L)
if isBool(x) == False:
Debug("%s in operation %s is not a boolean or doesn't exist!",v,s)
return x;
# Pops two numbers from the list (L) for function s
def PopTwoNumbers(L,s):
return (PopNumber(L,s,"x1"),PopNumber(L,s,"x2"));
# Pops two booleans from the list (L) for function s
def PopTwoBooleans(L,s):
return (PopBoolean(L,s,"x1"),PopBoolean(L,s,"x2"));
######## SPS Functions
#### Number Operators
#adds x1 and x2 from the operand stack and puts the result on the operand stack
def _ADD():
x1,x2 = PopTwoNumbers(operand,"_ADD")
return ListPush(operand,x1 + x2)
#subtracts x1 and x2 from the operand stack and puts the result on the operand stack
def _SUB():
x1,x2 = PopTwoNumbers(operand,"_SUB")
return ListPush(operand,x1 - x2)
#multiplies x1 and x2 from the operand stack and puts the result on the operand stack
def _MUL():
x1,x2 = PopTwoNumbers(operand,"_MUL")
return ListPush(operand,x1 * x2)
#divides x1 and x2 from the operand stack and puts the result on the operand stack
def _DIV():
x1,x2 = PopTwoNumbers(operand,"_DIV")
return ListPush(L,x1 / x2)
#### Logic Operators
# determines if x1 == x2 and puts the result on the operand stack
def _EQ():
x1,x2 = PopTwoNumbers(operand,"_EQ")
t = False
if x1 == x2:
t = True
return ListPush(operand,t)
# determines if x1 < x2 and puts the result on the operand stack
def _LT():
x1,x2 = PopTwoNumbers(operand,"_LT")
t = False
if x1 < x2:
t = True
return ListPush(operand,t)
# determines if x1 > x2 and puts the result on the operand stack
def _GT():
x1,x2 = PopTwoNumbers(operand,"_GT")
t = False
if x1 > x2:
t = True
return ListPush(operand,t)
#### Boolean Operators
# determines if x1 and x2 have the same truth value and puts the result on the operand stack
def _AND():
x1,x2 = PopTwoBooleans(operand,"_AND")
t = False
if x1 == True and x2 == True:
t = True
return ListPush(operand,t)
# determines if x1 or x2 have the same truth value and puts the result on the operand stack
def _OR():
x1,x2 = PopTwoBooleans(operand,"_OR")
t = False
if x1 == True or x2 == True:
t = True
return ListPush(operand,t)
# determines if not x1 and puts the result on the operand stack
def _NOT():
x = PopBoolean(operand,"_OR","x")
t = True
if x == True:
t = False
return ListPush(operand,t)
#### Stack Operators
# Duplicates the top value on the operand stack
def _DUP():
t = ListPop(L)
ListPush(L,t)
ListPush(L,t)
return True
# Exchanges the top two values on the operand stack
def _EXCH():
t = ListPop(L)
ListPushPosition(L,t, len(operand) - 1)
return True
# Pops the top value from the operand stack
def _POP():
return ListPop(L)
# Gets the value of x from a dictionary stack and gets it
# x = key to find result for
def _GET(x):
temp = []
found = False
for i in dictionary:
if x in i.keys():
try:
temp = i[x].copy()
except:
temp = i[x]
found = True
print(temp)
if found == True:
if len(temp) > 1:
InterpretorLoop(temp)
else:
z = convert(temp[0])
if isNumber(z) == True or isBool(z) == True:
ListPush(operand, z)
else:
_GET(temp[0])
return True
#### Dictionary Creation
# Creates an empty dictionary on the operand stack
def _DICTZ():
ListPush(operand,{})
return True
#### DICTIONARY MANIPULATION
# Gets the top value from the operand stack and, if its a dictionary, pushes it to the dictionary stack
def _BEGIN():
t = ListPop(operand)
if isDict(t) == False:
Debug("No Dictionary on the Stack for _BEGIN!")
return False
ListPush(dictionary,t)
return True
# Pops the top dictionary from the dictionary stack
def _END():
return ListPop(dictionary)
#### Name Defination
# defines a name. creates a new definition if key doesn't exist in top dict, otherwise modifies key
def _DEF(x,L):
tL = []
t2 = ''
t1 = x[1:]
if isString(t1) == False:
Debug("T1 in operation _DEF is not a string!")
temp = ListPopFirst(L)
if temp != '{':
t2 = temp
else:
curly = 1
while curly > 0:
x = ListPopFirst(L)
tL.append(x)
if x == '{':
curly = curly + 1
elif x == '}':
curly = curly - 1
ListPop(tL)
temp = ListPopFirst(L)
if temp != 'def':
Debug("No def to end _DEF command!")
if t2 != '':
DictionaryPushItem(t1,t2)
else:
DictionaryPushItem(t1,tL)
return True
#### Stack Printing
# Prints everything on a stack
def _STACK(L):
for i in L:
print(i)
return True
# Pops and prints the top value on a stack
def _EQUALS(L):
t = ListPop(L)
print(t)
return True
######## Stack Control
# Push a dictionary item onto the list
def ListPushItem(L,t,v):
x = {t:value}
L[len(dictionary)].append(x)
return True
# Push an item to the top of the list
def ListPush(L,t):
L.append(t)
return True
# Pop an item from the top of the list and return it
def ListPop(L):
if len(L) == 0:
Debug("No item on the Selected Stack to pop!")
return L.pop()
# Pop an item from the bottom of the list and return it
def ListPopFirst(L):
if len(L) == 0:
Debug("No item on the Selected Stack to pop!")
return L.pop(0)
# Push an item to the location desired on the list
def ListPushPosition(L,t,p):
L.insert(p,t)
return t
# Push an item to the top dictionary, modifying if key exists, otherwise creating new entry
def DictionaryPushItem(x1,x2):
t = len(dictionary) - 1
x = dictionary[t].get(x1,x2)
if x == None:
dictionary[t][x1] = dictionary[t].get(x1,x2)
else:
dictionary[t][x1] = x2
return True
######## File Reader
# Given a string, return the tokens it contains
def parse(s):
tokens = re.findall(pattern, s)
return tokens
# Given an open file, return the tokens it contains
def parseFile(f):
tokens = parse(''.join(f.readlines()))
return tokens
######## Printing
# Prints the output of the program
def printOutput():
print("")
print("OPERAND STACK:")
_STACK(operand)
print("")
print("---------------------------------------------")
print("")
print("DICTIONARY STACK:")
for i in dictionary:
print(i)
print("=======================")
return
######## Interpretor
# Main Access for the base interpretor
# L = list to make the execution stack from.
def InterpretorMain(L):
for word in L:
ListPush(execution,word)
_DICTZ()
_BEGIN()
InterpretorLoop(execution)
# Interpretor loop: cycles all commands in the passed list
# ex: list to take execution input from
def InterpretorLoop(ex):
t = ListPopFirst(ex)
temp = False
loopRunning = True
while loopRunning == True:
z = convert(t)
if isNumber(z) or isBool(z):
ListPush(operand,z)
elif t == 'add':
_ADD()
elif t == 'sub':
_SUB()
elif t == 'mul':
_MUL()
elif t == 'div':
_DIV()
elif t == 'eq':
_EQ()
LogicLoop(ex)
elif t == 'lt':
_LT()
LogicLoop(ex)
elif t == 'gt':
_GT()
LogicLoop(ex)
elif t == 'and':
_AND()
elif t == 'or':
_OR()
elif t == 'not':
_NOT()
elif t == 'dup':
_DUP()
elif t == 'exch':
_EXCH()
elif t == 'pop':
_POP()
elif t == 'dictz':
_DICTZ()
elif t == 'begin':
_BEGIN()
elif t == 'end':
_END()
elif t[0] == '/':
_DEF(t,ex)
elif t == 'stack':
_STACK(op)
elif t == '=':
_EQUALS(op)
else:
_GET(t)
if len(ex) == 0:
loopRunning = False
else:
t = ListPopFirst(ex)
return
# cycles through the logic statement
# ex: list to take execution input from
def LogicLoop(ex):
t = ListPopFirst(ex)
loopRunning = True
while loopRunning == True:
z = convert(t)
if t == 'and':
_AND()
elif t == 'or':
_OR()
elif t == 'not':
_NOT()
elif t == 'eq':
_EQ()
elif t == 'lt':
_LT()
elif t == 'gt':
_GT()
elif isNumber(z) or isBool(z):
ListPush(operand,z)
elif t == 'add':
_ADD()
elif t == 'sub':
_SUB()
elif t == 'mul':
_MUL()
elif t == 'div':
_DIV()
elif t == '{':
logicTrue = []
logicFalse = []
c = ListPop(operand) # check if condition is true
curly = 1
t = ListPopFirst(ex)
# Get True Logic Branch
while curly > 0:
ListPush(logicTrue,t)
t = ListPopFirst(ex)
if t == '{':
curly = curly + 1
elif t == '}':
curly = curly - 1
print(ex)
print (logicTrue)
t = ListPopFirst(ex)
if t == 'if': # if this is only an if statement, we're done already!
if c == True:
InterpretorLoop(logicTrue)
return
elif t == '{': # Otherwise, get the false branch
curly = 1
while curly > 0:
ListPush(logicFalse,t)
t = ListPopFirst(ex)
if t == '{':
curly = curly + 1
elif t == '}':
curly = curly - 1
t = ListPopFirst(ex)
if t == 'ifelse' and c == False:
InterpretorLoop(logicFalse)
return
elif t == 'ifelse' and c == True:
InterpretorLoop(logicTrue)
return
else:
Debug("Invalid ifelse statement!")
else:
Debug("Invalid if statement!")
t = ListPopFirst(ex)
return
######## Main
# Controls the Program. Can accept filename as an argument, if not supplied will ask for a file.
if __name__ == "__main__":
if len(sys.argv) > 1:
fn = sys.argv[1]
else:
fn = input ("Enter SPS File Name: ")
L = parseFile(open(fn,"r"))
print(L)
InterpretorMain(L)
printOutput()
| {
"content_hash": "4a7b387ec020ac48f6994befc29e8d31",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 107,
"avg_line_length": 26.644,
"alnum_prop": 0.5397087524395736,
"repo_name": "vonderborch/CS355",
"id": "fd47d6e24adbb595c153cdfc6d0fb2cc2d1fd74e",
"size": "13440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sps/sps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "119"
},
{
"name": "Java",
"bytes": "15067"
},
{
"name": "Python",
"bytes": "97651"
},
{
"name": "Racket",
"bytes": "5202"
},
{
"name": "Standard ML",
"bytes": "6734"
}
],
"symlink_target": ""
} |
"""Credentials logic for JSON CloudApi implementation."""
# This module duplicates some logic in third_party gcs_oauth2_boto_plugin
# because apitools credentials lib has its own mechanisms for file-locking
# and credential storage. As such, it doesn't require most of the
# gcs_oauth2_boto_plugin logic.
import json
import logging
import os
import traceback
from apitools.base.py import credentials_lib
from apitools.base.py import exceptions as apitools_exceptions
from boto import config
from gslib.cred_types import CredTypes
from gslib.exception import CommandException
from gslib.util import GetBotoConfigFileList
from gslib.util import GetGceCredentialCacheFilename
from gslib.util import GetGsutilClientIdAndSecret
import oauth2client
from oauth2client.client import HAS_CRYPTO
from oauth2client.contrib import devshell
from oauth2client.service_account import ServiceAccountCredentials
from six import BytesIO
DEFAULT_GOOGLE_OAUTH2_PROVIDER_AUTHORIZATION_URI = (
'https://accounts.google.com/o/oauth2/auth')
DEFAULT_GOOGLE_OAUTH2_PROVIDER_TOKEN_URI = (
'https://accounts.google.com/o/oauth2/token')
DEFAULT_SCOPES = [
u'https://www.googleapis.com/auth/cloud-platform',
u'https://www.googleapis.com/auth/cloud-platform.read-only',
u'https://www.googleapis.com/auth/devstorage.full_control',
u'https://www.googleapis.com/auth/devstorage.read_only',
u'https://www.googleapis.com/auth/devstorage.read_write'
]
GOOGLE_OAUTH2_DEFAULT_FILE_PASSWORD = 'notasecret'
def CheckAndGetCredentials(logger):
"""Returns credentials from the configuration file, if any are present.
Args:
logger: logging.Logger instance for outputting messages.
Returns:
OAuth2Credentials object if any valid ones are found, otherwise None.
"""
configured_cred_types = []
try:
if _HasOauth2UserAccountCreds():
configured_cred_types.append(CredTypes.OAUTH2_USER_ACCOUNT)
if _HasOauth2ServiceAccountCreds():
configured_cred_types.append(CredTypes.OAUTH2_SERVICE_ACCOUNT)
if len(configured_cred_types) > 1:
# We only allow one set of configured credentials. Otherwise, we're
# choosing one arbitrarily, which can be very confusing to the user
# (e.g., if only one is authorized to perform some action) and can
# also mask errors.
# Because boto merges config files, GCE credentials show up by default
# for GCE VMs. We don't want to fail when a user creates a boto file
# with their own credentials, so in this case we'll use the OAuth2
# user credentials.
failed_cred_type = None
raise CommandException(
('You have multiple types of configured credentials (%s), which is '
'not supported. One common way this happens is if you run gsutil '
'config to create credentials and later run gcloud auth, and '
'create a second set of credentials. Your boto config path is: '
'%s. For more help, see "gsutil help creds".')
% (configured_cred_types, GetBotoConfigFileList()))
failed_cred_type = CredTypes.OAUTH2_USER_ACCOUNT
user_creds = _GetOauth2UserAccountCredentials()
failed_cred_type = CredTypes.OAUTH2_SERVICE_ACCOUNT
service_account_creds = _GetOauth2ServiceAccountCredentials()
failed_cred_type = CredTypes.GCE
gce_creds = _GetGceCreds()
failed_cred_type = CredTypes.DEVSHELL
devshell_creds = _GetDevshellCreds()
return user_creds or service_account_creds or gce_creds or devshell_creds
except: # pylint: disable=bare-except
# If we didn't actually try to authenticate because there were multiple
# types of configured credentials, don't emit this warning.
if failed_cred_type:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(traceback.format_exc())
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
logger.warn(
'Your "%s" credentials are invalid. Please run\n'
' $ gcloud auth login', failed_cred_type)
else:
logger.warn(
'Your "%s" credentials are invalid. For more help, see '
'"gsutil help creds", or re-run the gsutil config command (see '
'"gsutil help config").', failed_cred_type)
# If there's any set of configured credentials, we'll fail if they're
# invalid, rather than silently falling back to anonymous config (as
# boto does). That approach leads to much confusion if users don't
# realize their credentials are invalid.
raise
def GetCredentialStoreKeyDict(credentials, api_version):
"""Disambiguates a credential for caching in a credential store.
Different credential types have different fields that identify them.
This function assembles relevant information in a dict and returns it.
Args:
credentials: An OAuth2Credentials object.
api_version: JSON API version being used.
Returns:
Dict of relevant identifiers for credentials.
"""
# TODO: If scopes ever become available in the credentials themselves,
# include them in the key dict.
key_dict = {'api_version': api_version}
# pylint: disable=protected-access
if isinstance(credentials, devshell.DevshellCredentials):
key_dict['user_email'] = credentials.user_email
elif isinstance(credentials, ServiceAccountCredentials):
key_dict['_service_account_email'] = credentials._service_account_email
elif isinstance(credentials, oauth2client.client.OAuth2Credentials):
if credentials.client_id and credentials.client_id != 'null':
key_dict['client_id'] = credentials.client_id
key_dict['refresh_token'] = credentials.refresh_token
# pylint: enable=protected-access
return key_dict
def _GetProviderTokenUri():
return config.get(
'OAuth2', 'provider_token_uri', DEFAULT_GOOGLE_OAUTH2_PROVIDER_TOKEN_URI)
def _HasOauth2ServiceAccountCreds():
return config.has_option('Credentials', 'gs_service_key_file')
def _HasOauth2UserAccountCreds():
return config.has_option('Credentials', 'gs_oauth2_refresh_token')
def _HasGceCreds():
return config.has_option('GoogleCompute', 'service_account')
def _GetOauth2ServiceAccountCredentials():
"""Retrieves OAuth2 service account credentials for a private key file."""
if not _HasOauth2ServiceAccountCreds():
return
provider_token_uri = _GetProviderTokenUri()
service_client_id = config.get('Credentials', 'gs_service_client_id', '')
private_key_filename = config.get('Credentials', 'gs_service_key_file', '')
private_key = None
with open(private_key_filename, 'rb') as private_key_file:
private_key = private_key_file.read()
json_key_dict = None
try:
json_key_dict = json.loads(private_key)
except ValueError:
pass
if json_key_dict:
# Key file is in JSON format.
for json_entry in ('client_id', 'client_email', 'private_key_id',
'private_key'):
if json_entry not in json_key_dict:
raise Exception('The JSON private key file at %s '
'did not contain the required entry: %s' %
(private_key_filename, json_entry))
return ServiceAccountCredentials.from_json_keyfile_dict(
json_key_dict, scopes=DEFAULT_SCOPES, token_uri=provider_token_uri)
else:
# Key file is in P12 format.
if HAS_CRYPTO:
if not service_client_id:
raise Exception('gs_service_client_id must be set if '
'gs_service_key_file is set to a .p12 key file')
key_file_pass = config.get(
'Credentials', 'gs_service_key_file_password',
GOOGLE_OAUTH2_DEFAULT_FILE_PASSWORD)
# We use _from_p12_keyfile_contents to avoid reading the key file
# again unnecessarily.
return ServiceAccountCredentials.from_p12_keyfile_buffer(
service_client_id, BytesIO(private_key),
private_key_password=key_file_pass, scopes=DEFAULT_SCOPES,
token_uri=provider_token_uri)
def _GetOauth2UserAccountCredentials():
"""Retrieves OAuth2 service account credentials for a refresh token."""
if not _HasOauth2UserAccountCreds():
return
provider_token_uri = _GetProviderTokenUri()
gsutil_client_id, gsutil_client_secret = GetGsutilClientIdAndSecret()
client_id = config.get('OAuth2', 'client_id',
os.environ.get('OAUTH2_CLIENT_ID', gsutil_client_id))
client_secret = config.get('OAuth2', 'client_secret',
os.environ.get('OAUTH2_CLIENT_SECRET',
gsutil_client_secret))
return oauth2client.client.OAuth2Credentials(
None, client_id, client_secret,
config.get('Credentials', 'gs_oauth2_refresh_token'), None,
provider_token_uri, None)
def _GetGceCreds():
if not _HasGceCreds():
return
try:
return credentials_lib.GceAssertionCredentials(
service_account_name=config.get(
'GoogleCompute', 'service_account', 'default'),
cache_filename=GetGceCredentialCacheFilename())
except apitools_exceptions.ResourceUnavailableError, e:
if 'service account' in str(e) and 'does not exist' in str(e):
return None
raise
def _GetDevshellCreds():
try:
return devshell.DevshellCredentials()
except devshell.NoDevshellServer:
return None
except:
raise
| {
"content_hash": "66a031f9fd11239b0dc576f81c39e0cb",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 79,
"avg_line_length": 38.54356846473029,
"alnum_prop": 0.7042738723221014,
"repo_name": "Sorsly/subtle",
"id": "6a7d51b57739246f985993eb9d306f59e95edf52",
"size": "9909",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/platform/gsutil/gslib/gcs_json_credentials.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
import os
from copy import deepcopy
import pytest
from dvc.parsing import DEFAULT_PARAMS_FILE, DataResolver, ResolveError
from dvc.parsing.context import Context
from dvc.utils.serialize import dumps_yaml
from . import CONTEXT_DATA, RESOLVED_DVC_YAML_DATA, TEMPLATED_DVC_YAML_DATA
DATA = {"models": {"bar": "bar", "foo": "foo"}}
def test_resolver(tmp_dir, dvc):
resolver = DataResolver(dvc, tmp_dir.fs_path, TEMPLATED_DVC_YAML_DATA)
resolver.context.merge_update(Context(CONTEXT_DATA))
assert resolver.resolve() == RESOLVED_DVC_YAML_DATA
def test_default_params_file_not_exist(tmp_dir, dvc):
d = {"vars": [DATA["models"]]}
resolver = DataResolver(dvc, tmp_dir.fs_path, d)
assert resolver.context == d["vars"][0]
def test_no_params_yaml_and_vars(tmp_dir, dvc):
resolver = DataResolver(dvc, tmp_dir.fs_path, {})
assert not resolver.context
def test_local_vars(tmp_dir, dvc):
resolver = DataResolver(
dvc, tmp_dir.fs_path, {"vars": [{"foo": "bar", "bar": "foo"}]}
)
assert resolver.context == {"foo": "bar", "bar": "foo"}
@pytest.mark.parametrize("vars_", ["${file}_params.yaml", {"foo": "${foo}"}])
def test_vars_interpolation_errors(tmp_dir, dvc, vars_):
with pytest.raises(ResolveError) as exc_info:
DataResolver(dvc, tmp_dir.fs_path, {"vars": [vars_, {"bar": "foo"}]})
assert (
str(exc_info.value)
== "failed to parse 'vars' in 'dvc.yaml': interpolating is not allowed"
)
@pytest.mark.parametrize(
"vars_", [{}, {"vars": []}, {"vars": [DEFAULT_PARAMS_FILE]}]
)
def test_default_params_file(tmp_dir, dvc, vars_):
(tmp_dir / DEFAULT_PARAMS_FILE).dump(DATA)
resolver = DataResolver(dvc, tmp_dir.fs_path, vars_)
assert resolver.context == DATA
def test_load_vars_from_file(tmp_dir, dvc):
(tmp_dir / DEFAULT_PARAMS_FILE).dump(DATA)
datasets = {"datasets": ["foo", "bar"]}
(tmp_dir / "params.json").dump(datasets)
d = {"vars": [DEFAULT_PARAMS_FILE, "params.json"]}
resolver = DataResolver(dvc, tmp_dir.fs_path, d)
expected = deepcopy(DATA)
expected.update(datasets)
assert resolver.context == expected
def test_load_vars_with_relpath(tmp_dir, scm, dvc):
tmp_dir.scm_gen(DEFAULT_PARAMS_FILE, dumps_yaml(DATA), commit="add params")
subdir = tmp_dir / "subdir"
d = {"vars": [os.path.relpath(tmp_dir / DEFAULT_PARAMS_FILE, subdir)]}
revisions = ["HEAD", "workspace"]
for rev in dvc.brancher(revs=["HEAD"]):
assert rev == revisions.pop()
resolver = DataResolver(dvc, subdir.fs_path, d)
assert resolver.context == deepcopy(DATA)
def test_partial_vars_doesnot_exist(tmp_dir, dvc):
(tmp_dir / "test_params.yaml").dump({"sub1": "sub1"})
with pytest.raises(ResolveError) as exc_info:
DataResolver(dvc, tmp_dir.fs_path, {"vars": ["test_params.yaml:sub2"]})
assert (
str(exc_info.value) == "failed to parse 'vars' in 'dvc.yaml': "
"could not find 'sub2' in 'test_params.yaml'"
)
def test_global_overwrite_error_on_imports(tmp_dir, dvc):
(tmp_dir / DEFAULT_PARAMS_FILE).dump(DATA)
(tmp_dir / "params.json").dump(DATA)
d = {"vars": [DEFAULT_PARAMS_FILE, "params.json"]}
with pytest.raises(ResolveError) as exc_info:
DataResolver(dvc, tmp_dir.fs_path, d)
assert str(exc_info.value) == (
"failed to parse 'vars' in 'dvc.yaml':\n"
"cannot redefine 'models.bar' from 'params.json' "
"as it already exists in 'params.yaml'"
)
def test_global_overwrite_vars(tmp_dir, dvc):
(tmp_dir / DEFAULT_PARAMS_FILE).dump(DATA)
d = {"vars": [DATA]}
with pytest.raises(ResolveError) as exc_info:
DataResolver(dvc, tmp_dir.fs_path, d)
assert str(exc_info.value) == (
"failed to parse 'vars' in 'dvc.yaml':\n"
"cannot redefine 'models.bar' from 'vars[0]' "
"as it already exists in 'params.yaml'"
)
def test_local_declared_vars_overwrite(tmp_dir, dvc):
(tmp_dir / DEFAULT_PARAMS_FILE).dump(DATA)
d = {"vars": [DATA["models"], DATA["models"]]}
with pytest.raises(ResolveError) as exc_info:
DataResolver(dvc, tmp_dir.fs_path, d)
assert str(exc_info.value) == (
"failed to parse 'vars' in 'dvc.yaml':\n"
"cannot redefine 'bar' from 'vars[1]' "
"as it already exists in 'vars[0]'"
)
def test_specified_params_file_not_exist(tmp_dir, dvc):
d = {"vars": ["not_existing_params.yaml"]}
with pytest.raises(ResolveError) as exc_info:
DataResolver(dvc, tmp_dir.fs_path, d)
assert str(exc_info.value) == (
"failed to parse 'vars' in 'dvc.yaml': "
"'not_existing_params.yaml' does not exist"
)
@pytest.mark.parametrize("local", [True, False])
@pytest.mark.parametrize(
"vars_",
[
["test_params.yaml", "test_params.yaml:sub1"],
["test_params.yaml:sub1", "test_params.yaml"],
["test_params.yaml:sub1", "test_params.yaml:sub1,sub2"],
],
)
def test_vars_already_loaded_message(tmp_dir, dvc, local, vars_):
d = {"stages": {"build": {"cmd": "echo ${sub1} ${sub2}"}}}
(tmp_dir / "test_params.yaml").dump({"sub1": "sub1", "sub2": "sub2"})
if not local:
d["vars"] = vars_
else:
d["stages"]["build"]["vars"] = vars_
with pytest.raises(ResolveError) as exc_info:
resolver = DataResolver(dvc, tmp_dir.fs_path, d)
resolver.resolve()
assert "partially" in str(exc_info.value)
@pytest.mark.parametrize(
"vars_, loc", [(DATA, "build.vars[0]"), ("params.json", "params.json")]
)
def test_local_overwrite_error(tmp_dir, dvc, vars_, loc):
(tmp_dir / DEFAULT_PARAMS_FILE).dump(DATA)
(tmp_dir / "params.json").dump(DATA)
d = {"stages": {"build": {"cmd": "echo ${models.foo}", "vars": [vars_]}}}
resolver = DataResolver(dvc, tmp_dir.fs_path, d)
with pytest.raises(ResolveError) as exc_info:
resolver.resolve()
assert str(exc_info.value) == (
"failed to parse stage 'build' in 'dvc.yaml':\n"
f"cannot redefine 'models.bar' from '{loc}' "
"as it already exists in 'params.yaml'"
)
| {
"content_hash": "e8ac1f81b5d7d97e3ced57abdda460d1",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 79,
"avg_line_length": 32.10471204188482,
"alnum_prop": 0.6232876712328768,
"repo_name": "dmpetrov/dataversioncontrol",
"id": "a2ac0c42b84e3dcaf427e2127d4932b01fad4d47",
"size": "6132",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/func/parsing/test_resolver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127601"
},
{
"name": "Shell",
"bytes": "1677"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class AdvancedFilter(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the AdvancedFilter Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(AdvancedFilter, self).__init__(temboo_session, '/Library/Clicky/AdvancedFilter')
def new_input_set(self):
return AdvancedFilterInputSet()
def _make_result_set(self, result, path):
return AdvancedFilterResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AdvancedFilterChoreographyExecution(session, exec_id, path)
class AdvancedFilterInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the AdvancedFilter
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((optional, string) The date or date range you want to access. Use YYYY-MM-DD format for date and YYYY-MM-DD,YYYY-MM-DD for a range. See docs for more options for this input. Defaults to 'today'.)
"""
super(AdvancedFilterInputSet, self)._set_input('Date', value)
def set_FilterName(self, value):
"""
Set the value of the FilterName input for this Choreo. ((required, string) The name of the data you want to filter by (i.e. ip_address). See docs for a complete list of supported filters.)
"""
super(AdvancedFilterInputSet, self)._set_input('FilterName', value)
def set_FilterValue(self, value):
"""
Set the value of the FilterValue input for this Choreo. ((required, string) The value of the filter you want to apply to the request. For example, if your FilterName is "ip_address", you could use "65.0.0.0,85.0.0.0" in the FilterValue.)
"""
super(AdvancedFilterInputSet, self)._set_input('FilterValue', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) The maximum number of results that will be returned. Defaults to 10.)
"""
super(AdvancedFilterInputSet, self)._set_input('Limit', value)
def set_Output(self, value):
"""
Set the value of the Output input for this Choreo. ((optional, string) What format you want the returned data to be in. Accepted values: xml, php, json, csv. Defaults to 'xml'.)
"""
super(AdvancedFilterInputSet, self)._set_input('Output', value)
def set_SiteID(self, value):
"""
Set the value of the SiteID input for this Choreo. ((required, integer) Your request must include the site's ID that you want to access data from. Available from your site preferences page.)
"""
super(AdvancedFilterInputSet, self)._set_input('SiteID', value)
def set_SiteKey(self, value):
"""
Set the value of the SiteKey input for this Choreo. ((required, string) The unique key assigned to you when you first register with Clicky. Available from your site preferences page.)
"""
super(AdvancedFilterInputSet, self)._set_input('SiteKey', value)
def set_Type(self, value):
"""
Set the value of the Type input for this Choreo. ((required, string) The type of data you want to retrieve. Note that not all types are available for this Choreo. Use types: visitors-list, segmentation, or actions-list.)
"""
super(AdvancedFilterInputSet, self)._set_input('Type', value)
class AdvancedFilterResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the AdvancedFilter Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Clicky formatted as specified in the Output parameter. Default is XML.)
"""
return self._output.get('Response', None)
class AdvancedFilterChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AdvancedFilterResultSet(response, path)
| {
"content_hash": "cb438c5bc1d2cf2d21e92f6767e0dee8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 253,
"avg_line_length": 50.02197802197802,
"alnum_prop": 0.6904657293497364,
"repo_name": "jordanemedlock/psychtruths",
"id": "c97304472a717f5e6734bfeab92a7c5e7a72607c",
"size": "5441",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "temboo/Library/Clicky/AdvancedFilter.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
"""TFLite SavedModel conversion test cases.
- Tests converting simple SavedModel graph to TFLite FlatBuffer.
- Tests converting simple SavedModel graph to frozen graph.
- Tests converting MNIST SavedModel to TFLite FlatBuffer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.lite.python import convert_saved_model
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
class TensorFunctionsTest(test_util.TensorFlowTestCase):
def testGetTensorsValid(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
tensors = convert_saved_model.get_tensors_from_tensor_names(
sess.graph, ["Placeholder"])
self.assertEqual("Placeholder:0", tensors[0].name)
def testGetTensorsInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
with self.assertRaises(ValueError) as error:
convert_saved_model.get_tensors_from_tensor_names(sess.graph,
["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSetTensorShapeValid(self):
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
convert_saved_model.set_tensor_shapes([tensor], {"Placeholder": [5, 3, 5]})
self.assertEqual([5, 3, 5], tensor.shape.as_list())
def testSetTensorShapeNoneValid(self):
tensor = array_ops.placeholder(dtype=dtypes.float32)
self.assertEqual(None, tensor.shape)
convert_saved_model.set_tensor_shapes([tensor], {"Placeholder": [1, 3, 5]})
self.assertEqual([1, 3, 5], tensor.shape.as_list())
def testSetTensorShapeInvalid(self):
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
convert_saved_model.set_tensor_shapes([tensor],
{"invalid-input": [5, 3, 5]})
self.assertEqual([None, 3, 5], tensor.shape.as_list())
def testSetTensorShapeEmpty(self):
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
convert_saved_model.set_tensor_shapes([tensor], {})
self.assertEqual([None, 3, 5], tensor.shape.as_list())
class FreezeSavedModelTest(test_util.TensorFlowTestCase):
def _createSimpleSavedModel(self, shape):
"""Create a simple SavedModel on the fly."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor = array_ops.placeholder(shape=shape, dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
inputs = {"x": in_tensor}
outputs = {"y": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _createSavedModelTwoInputArrays(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputB")
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputA")
out_tensor = in_tensor_1 + in_tensor_2
inputs = {"x": in_tensor_1, "y": in_tensor_2}
outputs = {"z": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _getArrayNames(self, tensors):
return [tensor.name for tensor in tensors]
def _getArrayShapes(self, tensors):
dims = []
for tensor in tensors:
dim_tensor = []
for dim in tensor.shape:
if isinstance(dim, tensor_shape.Dimension):
dim_tensor.append(dim.value)
else:
dim_tensor.append(dim)
dims.append(dim_tensor)
return dims
def _convertSavedModel(self,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
if tag_set is None:
tag_set = set([tag_constants.SERVING])
if signature_key is None:
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
graph_def, in_tensors, out_tensors = convert_saved_model.freeze_saved_model(
saved_model_dir=saved_model_dir,
input_arrays=input_arrays,
input_shapes=input_shapes,
output_arrays=output_arrays,
tag_set=tag_set,
signature_key=signature_key)
return graph_def, in_tensors, out_tensors
def testSimpleSavedModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testSimpleSavedModelWithNoneBatchSizeInShape(self):
"""Test a SavedModel with None in input tensor's shape."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithInvalidSignatureKey(self):
"""Test a SavedModel that fails due to an invalid signature_key."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, signature_key="invalid-key")
self.assertEqual(
"No 'invalid-key' in the SavedModel's SignatureDefs. "
"Possible values are 'serving_default'.", str(error.exception))
def testSimpleSavedModelWithInvalidOutputArray(self):
"""Test a SavedModel that fails due to invalid output arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, output_arrays=["invalid-output"])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
def testSimpleSavedModelWithWrongInputArrays(self):
"""Test a SavedModel that fails due to invalid input arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
# Check invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, input_arrays=["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Check valid and invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(
saved_model_dir, input_arrays=["Placeholder", "invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSimpleSavedModelWithCorrectArrays(self):
"""Test a SavedModel with correct input_arrays and output_arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
output_arrays=["add"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithCorrectInputArrays(self):
"""Test a SavedModel with correct input_arrays and input_shapes."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
input_shapes={"Placeholder": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testTwoInputArrays(self):
"""Test a simple SavedModel."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputB", "inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0", "inputB:0"])
self.assertEqual(
self._getArrayShapes(in_tensors), [[1, 16, 16, 3], [1, 16, 16, 3]])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
# Check case where input shape is given.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["inputA"],
input_shapes={"inputA": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
# Check case where input shape is None.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testMultipleMetaGraphDef(self):
"""Test saved model with multiple MetaGraphDefs."""
saved_model_dir = os.path.join(self.get_temp_dir(), "savedmodel_two_mgd")
builder = saved_model.builder.SavedModelBuilder(saved_model_dir)
with session.Session(graph=ops.Graph()) as sess:
# MetaGraphDef 1
in_tensor = array_ops.placeholder(shape=[1, 28, 28], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sig_input_tensor = saved_model.utils.build_tensor_info(in_tensor)
sig_input_tensor_signature = {"x": sig_input_tensor}
sig_output_tensor = saved_model.utils.build_tensor_info(out_tensor)
sig_output_tensor_signature = {"y": sig_output_tensor}
predict_signature_def = (
saved_model.signature_def_utils.build_signature_def(
sig_input_tensor_signature, sig_output_tensor_signature,
saved_model.signature_constants.PREDICT_METHOD_NAME))
signature_def_map = {
saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
predict_signature_def
}
builder.add_meta_graph_and_variables(
sess,
tags=[saved_model.tag_constants.SERVING, "additional_test_tag"],
signature_def_map=signature_def_map)
# MetaGraphDef 2
builder.add_meta_graph(tags=["tflite"])
builder.save(True)
# Convert to tflite
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
tag_set=set([saved_model.tag_constants.SERVING, "additional_test_tag"]))
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 28, 28]])
if __name__ == "__main__":
test.main()
| {
"content_hash": "bbbb2b99438199a6357c105339d65664",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 80,
"avg_line_length": 43.167808219178085,
"alnum_prop": 0.6739389131297104,
"repo_name": "brchiu/tensorflow",
"id": "dff582f1a16d2f228df5253652437e4b5266e502",
"size": "13294",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/lite/python/convert_saved_model_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import requests
import json
import sys
sys.path.append('../conn/')
import conn
#vol_name = raw_input('Input volume name:')
url = conn.url + 'storage/disk/'
auth = conn.auth
headers = conn.headers
payload = {
"disk_togglesmart": "true"
}
def replication_get():
r = requests.get(url, auth = auth)
result = json.loads(r.text)
i = 0
for i in range(0,len(result)):
print '\n'
for items in result[i]:
print items+':', result[i][items]
def replication_put():
id = raw_input('Input id:')+'/'
r = requests.put(url+id, auth = auth, data = json.dumps(payload), headers = headers)
result = json.loads(r.text)
for items in result:
print items+':', result[items]
while (1):
method = raw_input('Input method:')
if method == 'get':
replication_get()
elif method == 'put':
replication_put()
| {
"content_hash": "e6ee6dbb3b71c741171440f6704085e6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 86,
"avg_line_length": 23.22222222222222,
"alnum_prop": 0.631578947368421,
"repo_name": "PatriQ7/freenas-test-api",
"id": "0b73d34221475d504f70be7bba4ca0ff65dd77fd",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storage/disk.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "61858"
}
],
"symlink_target": ""
} |
__author__ = 'ruthlesshelp'
| {
"content_hash": "d180ac58349ff2d2d2197787595bc433",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.6428571428571429,
"repo_name": "excellaco/pycabara",
"id": "d12b4182c1686ed621ddd1c63e3792ae43757834",
"size": "28",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycabara/tests_unit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12961"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
from sympy import Eq, symbols, solve
# Symbols
release_group, resolution, format, video_codec, audio_codec = symbols('release_group resolution format video_codec audio_codec')
imdb_id, hash, title, series, tvdb_id, season, episode = symbols('imdb_id hash title series tvdb_id season episode') # @ReservedAssignment
year = symbols('year')
def get_episode_equations():
"""Get the score equations for a :class:`~subliminal.video.Episode`
The equations are the following:
1. hash = resolution + format + video_codec + audio_codec + series + season + episode + year + release_group
2. series = resolution + video_codec + audio_codec + season + episode + release_group + 1
3. year = series
4. tvdb_id = series + year
5. season = resolution + video_codec + audio_codec + 1
6. imdb_id = series + season + episode + year
7. format = video_codec + audio_codec
8. resolution = video_codec
9. video_codec = 2 * audio_codec
10. title = season + episode
11. season = episode
12. release_group = season
13. audio_codec = 1
:return: the score equations for an episode
:rtype: list of :class:`sympy.Eq`
"""
equations = []
equations.append(Eq(hash, resolution + format + video_codec + audio_codec + series + season + episode + year + release_group))
equations.append(Eq(series, resolution + video_codec + audio_codec + season + episode + release_group + 1))
equations.append(Eq(series, year))
equations.append(Eq(tvdb_id, series + year))
equations.append(Eq(season, resolution + video_codec + audio_codec + 1))
equations.append(Eq(imdb_id, series + season + episode + year))
equations.append(Eq(format, video_codec + audio_codec))
equations.append(Eq(resolution, video_codec))
equations.append(Eq(video_codec, 2 * audio_codec))
equations.append(Eq(title, season + episode))
equations.append(Eq(season, episode))
equations.append(Eq(release_group, season))
equations.append(Eq(audio_codec, 1))
return equations
def get_movie_equations():
"""Get the score equations for a :class:`~subliminal.video.Movie`
The equations are the following:
1. hash = resolution + format + video_codec + audio_codec + title + year + release_group
2. imdb_id = hash
3. resolution = video_codec
4. video_codec = 2 * audio_codec
5. format = video_codec + audio_codec
6. title = resolution + video_codec + audio_codec + year + 1
7. release_group = resolution + video_codec + audio_codec + 1
8. year = release_group + 1
9. audio_codec = 1
:return: the score equations for a movie
:rtype: list of :class:`sympy.Eq`
"""
equations = []
equations.append(Eq(hash, resolution + format + video_codec + audio_codec + title + year + release_group))
equations.append(Eq(imdb_id, hash))
equations.append(Eq(resolution, video_codec))
equations.append(Eq(video_codec, 2 * audio_codec))
equations.append(Eq(format, video_codec + audio_codec))
equations.append(Eq(title, resolution + video_codec + audio_codec + year + 1))
equations.append(Eq(video_codec, 2 * audio_codec))
equations.append(Eq(release_group, resolution + video_codec + audio_codec + 1))
equations.append(Eq(year, release_group + 1))
equations.append(Eq(audio_codec, 1))
return equations
if __name__ == '__main__':
print(solve(get_episode_equations(), [release_group, resolution, format, video_codec, audio_codec, imdb_id,
hash, series, tvdb_id, season, episode, title, year]))
print(solve(get_movie_equations(), [release_group, resolution, format, video_codec, audio_codec, imdb_id,
hash, title, year]))
| {
"content_hash": "93f301f189bf9d13bc290b85c8f9454e",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 139,
"avg_line_length": 43.29545454545455,
"alnum_prop": 0.6648293963254593,
"repo_name": "caronc/subliminal",
"id": "f9dcaedee5880fa8f4f7713c6505c0fc47fc3e50",
"size": "3856",
"binary": false,
"copies": "30",
"ref": "refs/heads/master",
"path": "subliminal/score.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""\
Karatsuba multiplication of polynomials
christoph dürr - jill-jênn vie - 2022
"""
# snip{
def eval_poly(P, x):
"""evaluate a polynomial in x.
:param P: an array representing the polynomial
:returns: the value of P(x)
:complexity: linear in the size of P.
"""
# use Horner's rule
retval = 0
for pi in reversed(P):
retval = retval * x + pi
return retval
# snip}
# snip{
def add_poly(P, Q):
""" Add two polynomials represented by their coefficients.
:param P, Q: two vectors representing polynomials
:returns: a vector representing the addition
:complexity: linear in the size of P and Q.
"""
if len(P) < len(Q):
P, Q = Q, P # add the shorter to the longer vector
R = P[::] # make a copy
for i, qi in enumerate(Q):
R[i] += qi # cumulate Q into R
return R
# snip}
# snip{
def sub_poly(P, Q):
""" Subtruct two polynomials represented by their coefficients.
:param P, Q: two vectrs representing polynomials
:returns: a vector representing the difference
:complexity: linear in the size of P and Q.
"""
return add_poly(P, [-qi for qi in Q])
# snip}
# snip{
def mul_poly(P, Q):
""" Karatsuba's algorithm.
Multiply two polynomials represented by their coefficients.
i.e. P(x) = sum P[i] x**i.
:param P, Q: two vectors representing polynomials
:returns: a vector representing the product
:complexity: $O(n^{\log_2 3})=O(n^{1.585})$, where n is total degree of P and Q.
"""
if not P or not Q: # case one of P, Q is the constant zero
return []
if len(P) == 1:
return [qi * P[0] for qi in Q]
elif len(Q) == 1:
return [pi * Q[0] for pi in P]
k = max(len(P), len(Q)) // 2
xk = [0] * k
a = P[:k] # split: P = a + b * x**k
b = P[k:]
c = Q[:k] # split: Q = c + d * x**k
d = Q[k:]
a_b = sub_poly(a, b)
c_d = sub_poly(c, d)
ac = mul_poly(a, c)
bd = mul_poly(b, d)
abcd = mul_poly(a_b, c_d)
ad_bc = sub_poly(add_poly(ac, bd), abcd) # = ad - bc
# result is ac + [ac + bd - (a - b)(c - d)]*x**k + bd*x**(2k)
return add_poly(ac, xk + add_poly(ad_bc, xk + bd))
# snip}
| {
"content_hash": "5af43a3475d7c7d0b492ae00352e1f51",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 84,
"avg_line_length": 29.723684210526315,
"alnum_prop": 0.5559982293050022,
"repo_name": "jilljenn/tryalgo",
"id": "9c3678d930d1419cb7655ba882d6ba41bb39444f",
"size": "2308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tryalgo/karatsuba.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "565"
},
{
"name": "Python",
"bytes": "287432"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._file_services_operations import (
build_get_service_properties_request,
build_list_request,
build_set_service_properties_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FileServicesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2019_04_01.aio.StorageManagementClient`'s
:attr:`file_services` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(self, resource_group_name: str, account_name: str, **kwargs: Any) -> _models.FileServiceItems:
"""List all file services in storage accounts.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceItems or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceItems
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-04-01")) # type: Literal["2019-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileServiceItems]
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("FileServiceItems", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices"} # type: ignore
@overload
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
file_services_name: Union[str, _models.Enum16],
parameters: _models.FileServiceProperties,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileServiceProperties:
"""Sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param file_services_name: The name of the file Service within the specified storage account.
File Service Name must be "default". "default" Required.
:type file_services_name: str or ~azure.mgmt.storage.v2019_04_01.models.Enum16
:param parameters: The properties of file services in storage accounts, including CORS
(Cross-Origin Resource Sharing) rules. Required.
:type parameters: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
file_services_name: Union[str, _models.Enum16],
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileServiceProperties:
"""Sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param file_services_name: The name of the file Service within the specified storage account.
File Service Name must be "default". "default" Required.
:type file_services_name: str or ~azure.mgmt.storage.v2019_04_01.models.Enum16
:param parameters: The properties of file services in storage accounts, including CORS
(Cross-Origin Resource Sharing) rules. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
file_services_name: Union[str, _models.Enum16],
parameters: Union[_models.FileServiceProperties, IO],
**kwargs: Any
) -> _models.FileServiceProperties:
"""Sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param file_services_name: The name of the file Service within the specified storage account.
File Service Name must be "default". "default" Required.
:type file_services_name: str or ~azure.mgmt.storage.v2019_04_01.models.Enum16
:param parameters: The properties of file services in storage accounts, including CORS
(Cross-Origin Resource Sharing) rules. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-04-01")) # type: Literal["2019-04-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileServiceProperties]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "FileServiceProperties")
request = build_set_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
file_services_name=file_services_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.set_service_properties.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("FileServiceProperties", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}"} # type: ignore
@distributed_trace_async
async def get_service_properties(
self, resource_group_name: str, account_name: str, file_services_name: Union[str, _models.Enum16], **kwargs: Any
) -> _models.FileServiceProperties:
"""Gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param file_services_name: The name of the file Service within the specified storage account.
File Service Name must be "default". "default" Required.
:type file_services_name: str or ~azure.mgmt.storage.v2019_04_01.models.Enum16
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-04-01")) # type: Literal["2019-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileServiceProperties]
request = build_get_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
file_services_name=file_services_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_service_properties.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("FileServiceProperties", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}"} # type: ignore
| {
"content_hash": "8754fa068469499633f647373e4ab0cd",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 221,
"avg_line_length": 47.63662790697674,
"alnum_prop": 0.6704704949044975,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2c325ca3644b107caabdbfdf903e4a197fe22138",
"size": "16887",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_04_01/aio/operations/_file_services_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
'''
Created on Mar 2, 2013
@author: Adam Speakman
@contact: http://github.com/adamsp
@contact: http://speakman.net.nz
@license: http://www.apache.org/licenses/LICENSE-2.0.html
'''
import json
import logging
from google.appengine.api import urlfetch
class DataRetriever:
def __init__(self, processors):
self.processors = processors
self.prev_max_post_id = 0
self.POST_COUNT = 200
def get_latest_url(self, post_id):
url = ["https://alpha-api.app.net/stream/0/posts/stream/global"]
# Note we do negative count - returns up to 200 from previous max towards newest.
# If we do positive count, it counts down from latest post towards the requested
# 'since' post. Easier to count from the 'since' post towards newest.
# If first time run though, we just do latest 200.
if post_id > 0:
url.append("?since_id=")
url.append(`post_id`)
url.append("&count=-")
else:
url.append("?count=")
url.append(`self.POST_COUNT`)
return ''.join(url)
def retrieve_latest_data(self):
more_posts = True
while more_posts:
url = self.get_latest_url(self.prev_max_post_id)
response = urlfetch.fetch(url)
if not response.status_code == 200:
logging.error('Request failed with status code ' + str(response.status_code)
+ ' and content: ' + str(response.content))
return
result = json.loads(response.content)
if result.has_key("data"):
posts = result["data"]
if len(posts) == 0:
return
for processor in self.processors:
processor.process_posts(posts)
# We want to short-circuit this if this is the first time
# we've run - otherwise we'll end up scanning *all* ADN posts.
if self.prev_max_post_id > 0:
more_posts = result["meta"]["more"]
else:
more_posts = False
self.prev_max_post_id = int(result["meta"]["max_id"])
| {
"content_hash": "c3fcd5222ce3f768fdb6fc33ecac89af",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 92,
"avg_line_length": 37.741379310344826,
"alnum_prop": 0.5609867519415258,
"repo_name": "adamsp/adnwc",
"id": "f44d0102c22c9d5b223cbe4a8df06d5cd217d89a",
"size": "2189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_retrieval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2822"
},
{
"name": "Python",
"bytes": "365363"
}
],
"symlink_target": ""
} |
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.database_accounts_operations import DatabaseAccountsOperations
from . import models
class DocumentDBConfiguration(AzureConfiguration):
"""Configuration for DocumentDB
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(DocumentDBConfiguration, self).__init__(base_url)
self.add_user_agent('documentdb/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class DocumentDB(object):
"""Azure DocumentDB Database Service Resource Provider REST API
:ivar config: Configuration for client.
:vartype config: DocumentDBConfiguration
:ivar database_accounts: DatabaseAccounts operations
:vartype database_accounts: azure.mgmt.documentdb.operations.DatabaseAccountsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = DocumentDBConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2015-04-08'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.database_accounts = DatabaseAccountsOperations(
self._client, self.config, self._serialize, self._deserialize)
| {
"content_hash": "69705694f6d59f12b5485fd7a4a40e10",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 91,
"avg_line_length": 38.875,
"alnum_prop": 0.7063236870310825,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "5eda44ab3e32b6ffe2af30ee5f14e705ec7ac69b",
"size": "3273",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "azure-mgmt-documentdb/azure/mgmt/documentdb/document_db.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""PFIF 1.1, 1.2, and 1.3 parsing and serialization (see http://zesty.ca/pfif/).
This module converts between PFIF XML documents (PFIF 1.1, 1.2, or 1.3) and
plain Python dictionaries that have PFIF 1.3 field names as keys (always 1.3)
and Unicode strings as values. Some useful constants are also defined here
according to the PFIF specification. Use parse() to parse PFIF 1.1, 1.2, or
1.3; use PFIF_1_1, PFIF_1_2, or PFIF_1_3 to serialize to the desired version."""
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
import StringIO
import logging
import os
import re
import xml.sax
import xml.sax.handler
# Possible values for the 'sex' field on a person record.
PERSON_SEX_VALUES = [
'', # unspecified
'female',
'male',
'other'
]
# Possible values for the 'pet_type' field on a pet record.
ANIMAL_TYPE_VALUES = [
'dog',
'cat',
'other'
]
# Possible values for the 'size' field on a pet record.
ANIMAL_SIZE_VALUES = [
'small',
'medium',
'large'
]
TAIL_LENGTH_VALUES = [
'none',
'short',
'medium',
'long'
]
# Possible values for the 'status' field on a note record.
NOTE_STATUS_VALUES = [
'', # unspecified
'information_sought',
'is_note_author',
'believed_alive',
'believed_missing',
'believed_dead',
]
# Fields to preserve in a placeholder for an expired record.
PLACEHOLDER_FIELDS = [
'person_record_id',
'source_date',
'entry_date',
'expiry_date'
]
def xml_escape(s):
# XML may only contain the following characters (even after entity
# references are expanded). See: http://www.w3.org/TR/REC-xml/#charsets
s = re.sub(ur'''[^\x09\x0a\x0d\x20-\ud7ff\ue000-\ufffd]''', '', s)
return s.replace('&','&').replace('<','<').replace('>','>')
class PfifVersion:
def __init__(self, version, ns, fields, mandatory_fields, serializers):
self.version = version
self.ns = ns
# A dict mapping each record type to a list of its fields in order.
self.fields = fields
# A dict mapping each record type to a list of its mandatory fields.
self.mandatory_fields = mandatory_fields
# A dict mapping field names to serializer functions.
self.serializers = serializers
def check_tag(self, (ns, local), parent=None):
"""Given a namespace-qualified tag and its parent, returns the PFIF
type or field name if the tag is valid, or None if the tag is not
recognized."""
if ns == self.ns:
if not parent or local in self.fields[parent]:
return local
def write_fields(self, file, type, record, indent=''):
"""Writes PFIF tags for a record's fields."""
for field in self.fields[type]:
if record.get(field) or field in self.mandatory_fields[type]:
escaped_value = xml_escape(record.get(field, ''))
file.write(indent + '<pfif:%s>%s</pfif:%s>\n' %
(field, escaped_value.encode('utf-8'), field))
def write_person(self, file, person, notes=[], indent=''):
"""Writes PFIF for a person record and a list of its note records."""
file.write(indent + '<pfif:person>\n')
self.write_fields(file, 'person', person, indent + ' ')
for note in notes:
self.write_note(file, note, indent + ' ')
file.write(indent + '</pfif:person>\n')
def write_note(self, file, note, indent=''):
"""Writes PFIF for a note record."""
file.write(indent + '<pfif:note>\n')
self.write_fields(file, 'note', note, indent + ' ')
file.write(indent + '</pfif:note>\n')
def write_file(self, file, persons, get_notes_for_person=lambda p: []):
"""Takes a list of person records and a function that gets the list
of note records for each person, and writes PFIF to the given file
object. Each record is a plain dictionary of strings."""
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<pfif:pfif xmlns:pfif="%s">\n' % self.ns)
for person in persons:
self.write_person(file, person, get_notes_for_person(person), ' ')
file.write('</pfif:pfif>\n')
def entity_to_dict(self, entity, fields):
"""Convert an entity to a Python dictionary of Unicode strings."""
record = {}
for field in fields:
if field == 'home_zip' and not hasattr(entity, field):
# When writing PFIF 1.1, rename home_postal_code to home_zip.
value = getattr(entity, 'home_postal_code', None)
else:
value = getattr(entity, field, None)
if value:
record[field] = SERIALIZERS.get(field, nop)(value)
return record
def person_to_dict(self, entity, expired=False):
dict = self.entity_to_dict(entity, self.fields['person'])
if expired: # Clear all fields except those needed for the placeholder.
for field in set(dict.keys()) - set(PLACEHOLDER_FIELDS):
del dict[field]
return dict
def note_to_dict(self, entity):
return self.entity_to_dict(entity, self.fields['note'])
# Serializers that convert Python values to PFIF strings.
def nop(value):
return value
def format_boolean(value):
return value and 'true' or 'false'
def format_utc_datetime(dt):
return dt and dt.replace(microsecond=0).isoformat() + 'Z' or ''
SERIALIZERS = { # Serialization functions (for fields that need conversion).
'found': format_boolean,
'source_date': format_utc_datetime,
'entry_date': format_utc_datetime,
'expiry_date': format_utc_datetime
}
PFIF_1_1 = PfifVersion(
'1.1',
'http://zesty.ca/pfif/1.1',
{
'person': [ # Fields of a <person> element, in PFIF 1.1 standard order.
'person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'first_name',
'last_name',
'home_city',
'home_state',
'home_neighborhood',
'home_street',
'home_zip',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element, in PFIF 1.1 standard order.
'note_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{
'person': ['person_record_id', 'first_name', 'last_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_1_2 = PfifVersion(
'1.2',
'http://zesty.ca/pfif/1.2',
{
'person': [ # Fields of a <person> element in PFIF 1.2.
'person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'first_name',
'last_name',
'sex',
'date_of_birth',
'age',
'home_street',
'home_neighborhood',
'home_city',
'home_state',
'home_postal_code',
'home_country',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element in PFIF 1.2.
'note_record_id',
'person_record_id',
'linked_person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'status',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{
'person': ['person_record_id', 'first_name', 'last_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_1_3 = PfifVersion(
'1.3',
'http://zesty.ca/pfif/1.3',
{
'person': [ # Fields of a <person> element in PFIF 1.3.
'person_record_id',
'entry_date',
'expiry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'full_name',
'first_name',
'last_name',
'sex',
'date_of_birth',
'age',
'home_street',
'home_neighborhood',
'home_city',
'home_state',
'home_postal_code',
'home_country',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element in PFIF 1.3.
'note_record_id',
'person_record_id',
'linked_person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'status',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{
'person': ['person_record_id', 'full_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_VERSIONS = {
'1.1': PFIF_1_1,
'1.2': PFIF_1_2,
'1.3': PFIF_1_3
}
PFIF_DEFAULT_VERSION = '1.2'
assert PFIF_DEFAULT_VERSION in PFIF_VERSIONS
def check_pfif_tag(name, parent=None):
"""Recognizes a PFIF XML tag from either version of PFIF."""
return PFIF_1_3.check_tag(name, parent) or \
PFIF_1_2.check_tag(name, parent) or \
PFIF_1_1.check_tag(name, parent)
def split_first_last_name(all_names):
"""Attempt to extract a last name for a person from a multi-first-name."""
name = re.sub(r'\(.*\)', ' ', all_names)
name = re.sub(r'\(\S*', ' ', name)
name = re.sub(r'\d', '', name)
names = name.split()
if len(names) > 1:
last_name = re.search(
r' (\S*(-+ | -+|-+)?\S+)\s*$', name).group(1).strip()
return all_names.replace(last_name, ''), last_name.replace(' ', '')
class Handler(xml.sax.handler.ContentHandler):
"""SAX event handler for parsing PFIF documents."""
def __init__(self):
self.tags = []
self.person = {}
self.note = {}
self.enclosed_notes = [] # Notes enclosed by the current <person>.
self.person_records = []
self.note_records = []
def startElementNS(self, tag, qname, attrs):
self.tags.append(tag)
if check_pfif_tag(tag) == 'person':
self.person = {}
self.enclosed_notes = []
elif check_pfif_tag(tag) == 'note':
self.note = {}
def endElementNS(self, tag, qname):
assert self.tags.pop() == tag
if check_pfif_tag(tag) == 'person':
self.person_records.append(self.person)
if 'person_record_id' in self.person:
# Copy the person's person_record_id to any enclosed notes.
for note in self.enclosed_notes:
note['person_record_id'] = self.person['person_record_id']
elif check_pfif_tag(tag) == 'note':
# Save all parsed notes (whether or not enclosed in <person>).
self.note_records.append(self.note)
self.enclosed_notes.append(self.note)
def append_to_field(self, record, tag, parent, content):
field = check_pfif_tag(tag, parent)
if field:
record[field] = record.get(field, u'') + content
elif content.strip():
logging.warn('ignored tag %r with content %r', tag, content)
def characters(self, content):
if content and len(self.tags) >= 2:
parent, tag = self.tags[-2], self.tags[-1]
if check_pfif_tag(parent) == 'person':
self.append_to_field(self.person, tag, 'person', content)
elif check_pfif_tag(parent) == 'note':
self.append_to_field(self.note, tag, 'note', content)
def parse_file(pfif_utf8_file):
"""Reads a UTF-8-encoded PFIF file to give a list of person records and a
list of note records. Each record is a plain dictionary of strings."""
handler = Handler()
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, True)
parser.setContentHandler(handler)
parser.parse(pfif_utf8_file)
return handler.person_records, handler.note_records
def parse(pfif_text):
"""Takes the text of a PFIF document, as a Unicode string or UTF-8 string,
and returns a list of person records and a list of note records. Each
record is a plain dictionary of strings."""
if isinstance(pfif_text, unicode):
pfif_text = pfif_text.decode('utf-8')
return parse_file(StringIO.StringIO(pfif_text))
| {
"content_hash": "4d9faacb28c29cfb5279974d9f1d1d34",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 80,
"avg_line_length": 33.065,
"alnum_prop": 0.5514894903977015,
"repo_name": "pet-finder/pet-finder",
"id": "e63088cb9a774e71558de509a1302820cd1d5c9e",
"size": "13824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/pfif.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "8155"
},
{
"name": "Python",
"bytes": "649173"
},
{
"name": "Shell",
"bytes": "1665"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('dojo', '0085_add_publish_date_cvssv3_score'),
]
operations = [
migrations.RenameField(
model_name='jira_instance',
old_name='issue_template',
new_name='issue_template_dir',
),
migrations.RenameField(
model_name='jira_project',
old_name='issue_template',
new_name='issue_template_dir',
),
migrations.CreateModel(
name='Finding_Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=255)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dojo.Dojo_User')),
('findings', models.ManyToManyField(to='dojo.Finding')),
('test', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dojo.Test')),
],
options={
'ordering': ['id'],
},
),
migrations.AddField(
model_name='jira_issue',
name='finding_group',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.Finding_Group'),
),
]
| {
"content_hash": "784fb773986e64738e05bc69c808cbf5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 132,
"avg_line_length": 40.348837209302324,
"alnum_prop": 0.5850144092219021,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "476317178b1e8f0adf72bd24f70ec4dcf16775ea",
"size": "1785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/db_migrations/0086_finding_groups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class MaintenanceConfig(AppConfig):
name = 'maintenance'
| {
"content_hash": "505fddd430fe7dbc1c2bbc842ee53fa3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 35,
"avg_line_length": 19.4,
"alnum_prop": 0.7731958762886598,
"repo_name": "edisondotme/motoPi",
"id": "36cf6806cccd7dd3314479dd8b54cae63a274872",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maintenance/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "203"
},
{
"name": "HTML",
"bytes": "3118"
},
{
"name": "JavaScript",
"bytes": "15087"
},
{
"name": "Python",
"bytes": "14204"
}
],
"symlink_target": ""
} |
"""Handles ConsoleProxy API requests."""
from nova.compute import rpcapi as compute_rpcapi
from nova.console import rpcapi as console_rpcapi
from nova.db import base
from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common import uuidutils
CONF = cfg.CONF
CONF.import_opt('console_topic', 'nova.console.rpcapi')
class API(base.Base):
"""API for spinning up or down console proxy connections."""
def __init__(self, **kwargs):
super(API, self).__init__(**kwargs)
def get_consoles(self, context, instance_uuid):
return self.db.console_get_all_by_instance(context, instance_uuid)
def get_console(self, context, instance_uuid, console_uuid):
return self.db.console_get(context, console_uuid, instance_uuid)
def delete_console(self, context, instance_uuid, console_uuid):
console = self.db.console_get(context, console_uuid, instance_uuid)
topic = rpc.queue_get_for(context, CONF.console_topic,
console['pool']['host'])
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
rpcapi.remove_console(context, console['id'])
def create_console(self, context, instance_uuid):
#NOTE(mdragon): If we wanted to return this the console info
# here, as we would need to do a call.
# They can just do an index later to fetch
# console info. I am not sure which is better
# here.
instance = self._get_instance(context, instance_uuid)
topic = self._get_console_topic(context, instance['host']),
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
rpcapi.add_console(context, instance['id'])
def _get_console_topic(self, context, instance_host):
rpcapi = compute_rpcapi.ComputeAPI()
return rpcapi.get_console_topic(context, instance_host)
def _get_instance(self, context, instance_uuid):
if uuidutils.is_uuid_like(instance_uuid):
instance = self.db.instance_get_by_uuid(context, instance_uuid)
else:
instance = self.db.instance_get(context, instance_uuid)
return instance
def get_backdoor_port(self, context, host):
topic = self._get_console_topic(context, host)
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
return rpcapi.get_backdoor_port(context, host)
| {
"content_hash": "3c64e20c08e9cd471c5fb34643ed7a0c",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 75,
"avg_line_length": 41.8448275862069,
"alnum_prop": 0.6571899464359291,
"repo_name": "fajoy/nova",
"id": "57c5cb0e3e8db114bd1fb2debd6a60cf6085d4ea",
"size": "3107",
"binary": false,
"copies": "2",
"ref": "refs/heads/grizzly-2",
"path": "nova/console/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7567423"
},
{
"name": "Shell",
"bytes": "15428"
}
],
"symlink_target": ""
} |
"""Defines utility functions."""
from typing import Iterable, Union, Optional, Any
from jax.tree_util import register_pytree_node
from jax.lib import xla_bridge
import jax.numpy as jnp
from jax import jit
from functools import partial
import numpy as onp
Array = jnp.ndarray
PyTree = Any
i16 = jnp.int16
i32 = jnp.int32
i64 = jnp.int64
f32 = jnp.float32
f64 = jnp.float64
CUSTOM_SIMULATION_TYPE = []
def register_custom_simulation_type(t: Any):
global CUSTOM_SIMULATION_TYPE
CUSTOM_SIMULATION_TYPE += [t]
def check_custom_simulation_type(x: Any) -> bool:
if type(x) in CUSTOM_SIMULATION_TYPE:
raise ValueError()
def static_cast(*xs):
"""Function to cast a value to the lowest dtype that can express it."""
# NOTE(schsam): static_cast is so named because it cannot be jit.
if xla_bridge.get_backend().platform == 'tpu':
return (jnp.array(x, jnp.float32) for x in xs)
else:
return (jnp.array(x, dtype=onp.min_scalar_type(x)) for x in xs)
def register_pytree_namedtuple(cls):
register_pytree_node(
cls,
lambda xs: (tuple(xs), None),
lambda _, xs: cls(*xs))
def merge_dicts(a, b, ignore_unused_parameters=False):
if not ignore_unused_parameters:
return {**a, **b}
merged = dict(a)
for key in merged.keys():
b_val = b.get(key)
if b_val is not None:
merged[key] = b_val
return merged
@partial(jit, static_argnums=(1,))
def safe_mask(mask, fn, operand, placeholder=0):
masked = jnp.where(mask, operand, 0)
return jnp.where(mask, fn(masked), placeholder)
def high_precision_sum(X: Array,
axis: Optional[Union[Iterable[int], int]]=None,
keepdims: bool=False):
"""Sums over axes at 64-bit precision then casts back to original dtype."""
if jnp.issubdtype(X.dtype, jnp.integer):
dtyp = jnp.int64
elif jnp.issubdtype(X.dtype, jnp.complexfloating):
dtyp = jnp.complex128
else:
dtyp = jnp.float64
return jnp.array(
jnp.sum(X, axis=axis, dtype=dtyp, keepdims=keepdims), dtype=X.dtype)
def maybe_downcast(x):
if isinstance(x, jnp.ndarray) and x.dtype is jnp.dtype('float64'):
return x
return jnp.array(x, f32)
def is_array(x: Any) -> bool:
return isinstance(x, (jnp.ndarray, onp.ndarray))
| {
"content_hash": "f4993e5a29251abed756f6975d46b45f",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 24.06382978723404,
"alnum_prop": 0.6741821396993811,
"repo_name": "google/jax-md",
"id": "57f901cc5b731746ff8e3f69127af33041d565e9",
"size": "2838",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax_md/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "32973"
},
{
"name": "Jupyter Notebook",
"bytes": "6632451"
},
{
"name": "Python",
"bytes": "750714"
}
],
"symlink_target": ""
} |
class PaleBaseResponse(object):
def __init__(self, *args):
super(PaleBaseResponse, self).__init__(*args)
if args:
self.message = args[0]
else:
self.message = "i am a teapot"
@property
def response(self):
http_status = getattr(self, 'http_status_code', 418)
response_body = getattr(self, 'response_body', "i am a teapot")
headers = getattr(self, 'headers', None)
return (response_body, http_status, headers)
class PaleRaisedResponse(PaleBaseResponse, Exception):
pass
class RedirectFound(PaleRaisedResponse):
http_status_code = 302
response_body = ""
def __init__(self, redirect_url):
self.redirect_url = redirect_url
super(RedirectFound, self).__init__("Redirect to `%s`" % redirect_url)
@property
def headers(self):
return [('Location', self.redirect_url)]
| {
"content_hash": "70683ba053b8fc2d552bf756cec3fb30",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 29.193548387096776,
"alnum_prop": 0.6143646408839779,
"repo_name": "Loudr/pale",
"id": "8b586635e0c0ed69a320f0aae754ee3d0858dacf",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pale/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175823"
},
{
"name": "Vim script",
"bytes": "49"
}
],
"symlink_target": ""
} |
"""
Climate on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/zha.climate/
"""
from datetime import datetime, timedelta
import enum
import functools
import logging
from random import randint
from typing import List, Optional, Tuple
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
DOMAIN,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_HALVES, TEMP_CELSIUS
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.util.dt as dt_util
from .core import discovery
from .core.const import (
CHANNEL_FAN,
CHANNEL_THERMOSTAT,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
DEPENDENCIES = ["zha"]
ATTR_SYS_MODE = "system_mode"
ATTR_RUNNING_MODE = "running_mode"
ATTR_SETPT_CHANGE_SRC = "setpoint_change_source"
ATTR_SETPT_CHANGE_AMT = "setpoint_change_amount"
ATTR_OCCUPANCY = "occupancy"
ATTR_PI_COOLING_DEMAND = "pi_cooling_demand"
ATTR_PI_HEATING_DEMAND = "pi_heating_demand"
ATTR_OCCP_COOL_SETPT = "occupied_cooling_setpoint"
ATTR_OCCP_HEAT_SETPT = "occupied_heating_setpoint"
ATTR_UNOCCP_HEAT_SETPT = "unoccupied_heating_setpoint"
ATTR_UNOCCP_COOL_SETPT = "unoccupied_cooling_setpoint"
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
RUNNING_MODE = {0x00: HVAC_MODE_OFF, 0x03: HVAC_MODE_COOL, 0x04: HVAC_MODE_HEAT}
class ThermostatFanMode(enum.IntEnum):
"""Fan channel enum for thermostat Fans."""
OFF = 0x00
ON = 0x04
AUTO = 0x05
class RunningState(enum.IntFlag):
"""ZCL Running state enum."""
HEAT = 0x0001
COOL = 0x0002
FAN = 0x0004
HEAT_STAGE_2 = 0x0008
COOL_STAGE_2 = 0x0010
FAN_STAGE_2 = 0x0020
FAN_STAGE_3 = 0x0040
SEQ_OF_OPERATION = {
0x00: (HVAC_MODE_OFF, HVAC_MODE_COOL), # cooling only
0x01: (HVAC_MODE_OFF, HVAC_MODE_COOL), # cooling with reheat
0x02: (HVAC_MODE_OFF, HVAC_MODE_HEAT), # heating only
0x03: (HVAC_MODE_OFF, HVAC_MODE_HEAT), # heating with reheat
# cooling and heating 4-pipes
0x04: (HVAC_MODE_OFF, HVAC_MODE_HEAT_COOL, HVAC_MODE_COOL, HVAC_MODE_HEAT),
# cooling and heating 4-pipes
0x05: (HVAC_MODE_OFF, HVAC_MODE_HEAT_COOL, HVAC_MODE_COOL, HVAC_MODE_HEAT),
0x06: (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF), # centralite specific
0x07: (HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF), # centralite specific
}
class SystemMode(enum.IntEnum):
"""ZCL System Mode attribute enum."""
OFF = 0x00
HEAT_COOL = 0x01
COOL = 0x03
HEAT = 0x04
AUX_HEAT = 0x05
PRE_COOL = 0x06
FAN_ONLY = 0x07
DRY = 0x08
SLEEP = 0x09
HVAC_MODE_2_SYSTEM = {
HVAC_MODE_OFF: SystemMode.OFF,
HVAC_MODE_HEAT_COOL: SystemMode.HEAT_COOL,
HVAC_MODE_COOL: SystemMode.COOL,
HVAC_MODE_HEAT: SystemMode.HEAT,
HVAC_MODE_FAN_ONLY: SystemMode.FAN_ONLY,
HVAC_MODE_DRY: SystemMode.DRY,
}
SYSTEM_MODE_2_HVAC = {
SystemMode.OFF: HVAC_MODE_OFF,
SystemMode.HEAT_COOL: HVAC_MODE_HEAT_COOL,
SystemMode.COOL: HVAC_MODE_COOL,
SystemMode.HEAT: HVAC_MODE_HEAT,
SystemMode.AUX_HEAT: HVAC_MODE_HEAT,
SystemMode.PRE_COOL: HVAC_MODE_COOL, # this is 'precooling'. is it the same?
SystemMode.FAN_ONLY: HVAC_MODE_FAN_ONLY,
SystemMode.DRY: HVAC_MODE_DRY,
SystemMode.SLEEP: HVAC_MODE_OFF,
}
ZCL_TEMP = 100
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation sensor from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
@STRICT_MATCH(channel_names=CHANNEL_THERMOSTAT, aux_channels=CHANNEL_FAN)
class Thermostat(ZhaEntity, ClimateEntity):
"""Representation of a ZHA Thermostat device."""
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_TEMP = 7
_domain = DOMAIN
value_attribute = 0x0000
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._thrm = self.cluster_channels.get(CHANNEL_THERMOSTAT)
self._preset = PRESET_NONE
self._presets = []
self._supported_flags = SUPPORT_TARGET_TEMPERATURE
self._fan = self.cluster_channels.get(CHANNEL_FAN)
@property
def current_temperature(self):
"""Return the current temperature."""
if self._thrm.local_temp is None:
return None
return self._thrm.local_temp / ZCL_TEMP
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
data = {}
if self.hvac_mode:
mode = SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode, "unknown")
data[ATTR_SYS_MODE] = f"[{self._thrm.system_mode}]/{mode}"
if self._thrm.occupancy is not None:
data[ATTR_OCCUPANCY] = self._thrm.occupancy
if self._thrm.occupied_cooling_setpoint is not None:
data[ATTR_OCCP_COOL_SETPT] = self._thrm.occupied_cooling_setpoint
if self._thrm.occupied_heating_setpoint is not None:
data[ATTR_OCCP_HEAT_SETPT] = self._thrm.occupied_heating_setpoint
if self._thrm.pi_heating_demand is not None:
data[ATTR_PI_HEATING_DEMAND] = self._thrm.pi_heating_demand
if self._thrm.pi_cooling_demand is not None:
data[ATTR_PI_COOLING_DEMAND] = self._thrm.pi_cooling_demand
unoccupied_cooling_setpoint = self._thrm.unoccupied_cooling_setpoint
if unoccupied_cooling_setpoint is not None:
data[ATTR_UNOCCP_HEAT_SETPT] = unoccupied_cooling_setpoint
unoccupied_heating_setpoint = self._thrm.unoccupied_heating_setpoint
if unoccupied_heating_setpoint is not None:
data[ATTR_UNOCCP_COOL_SETPT] = unoccupied_heating_setpoint
return data
@property
def fan_mode(self) -> Optional[str]:
"""Return current FAN mode."""
if self._thrm.running_state is None:
return FAN_AUTO
if self._thrm.running_state & (
RunningState.FAN | RunningState.FAN_STAGE_2 | RunningState.FAN_STAGE_3
):
return FAN_ON
return FAN_AUTO
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return supported FAN modes."""
if not self._fan:
return None
return [FAN_AUTO, FAN_ON]
@property
def hvac_action(self) -> Optional[str]:
"""Return the current HVAC action."""
if (
self._thrm.pi_heating_demand is None
and self._thrm.pi_cooling_demand is None
):
return self._rm_rs_action
return self._pi_demand_action
@property
def _rm_rs_action(self) -> Optional[str]:
"""Return the current HVAC action based on running mode and running state."""
running_mode = self._thrm.running_mode
if running_mode == SystemMode.HEAT:
return CURRENT_HVAC_HEAT
if running_mode == SystemMode.COOL:
return CURRENT_HVAC_COOL
running_state = self._thrm.running_state
if running_state and running_state & (
RunningState.FAN | RunningState.FAN_STAGE_2 | RunningState.FAN_STAGE_3
):
return CURRENT_HVAC_FAN
if self.hvac_mode != HVAC_MODE_OFF and running_mode == SystemMode.OFF:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@property
def _pi_demand_action(self) -> Optional[str]:
"""Return the current HVAC action based on pi_demands."""
heating_demand = self._thrm.pi_heating_demand
if heating_demand is not None and heating_demand > 0:
return CURRENT_HVAC_HEAT
cooling_demand = self._thrm.pi_cooling_demand
if cooling_demand is not None and cooling_demand > 0:
return CURRENT_HVAC_COOL
if self.hvac_mode != HVAC_MODE_OFF:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@property
def hvac_mode(self) -> Optional[str]:
"""Return HVAC operation mode."""
return SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode)
@property
def hvac_modes(self) -> Tuple[str, ...]:
"""Return the list of available HVAC operation modes."""
return SEQ_OF_OPERATION.get(self._thrm.ctrl_seqe_of_oper, (HVAC_MODE_OFF,))
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def preset_mode(self) -> Optional[str]:
"""Return current preset mode."""
return self._preset
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return supported preset modes."""
return self._presets
@property
def supported_features(self):
"""Return the list of supported features."""
features = self._supported_flags
if HVAC_MODE_HEAT_COOL in self.hvac_modes:
features |= SUPPORT_TARGET_TEMPERATURE_RANGE
if self._fan is not None:
self._supported_flags |= SUPPORT_FAN_MODE
return features
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
temp = None
if self.hvac_mode == HVAC_MODE_COOL:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
elif self.hvac_mode == HVAC_MODE_HEAT:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode != HVAC_MODE_HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode != HVAC_MODE_HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
temps = []
if HVAC_MODE_HEAT in self.hvac_modes:
temps.append(self._thrm.max_heat_setpoint_limit)
if HVAC_MODE_COOL in self.hvac_modes:
temps.append(self._thrm.max_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MAX_TEMP
return round(max(temps) / ZCL_TEMP, 1)
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
temps = []
if HVAC_MODE_HEAT in self.hvac_modes:
temps.append(self._thrm.min_heat_setpoint_limit)
if HVAC_MODE_COOL in self.hvac_modes:
temps.append(self._thrm.min_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MIN_TEMP
return round(min(temps) / ZCL_TEMP, 1)
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._thrm, SIGNAL_ATTR_UPDATED, self.async_attribute_updated
)
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if (
record.attr_name in (ATTR_OCCP_COOL_SETPT, ATTR_OCCP_HEAT_SETPT)
and self.preset_mode == PRESET_AWAY
):
# occupancy attribute is an unreportable attribute, but if we get
# an attribute update for an "occupied" setpoint, there's a chance
# occupancy has changed
occupancy = await self._thrm.get_occupancy()
if occupancy is True:
self._preset = PRESET_NONE
self.debug("Attribute '%s' = %s update", record.attr_name, record.value)
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set fan mode."""
if fan_mode not in self.fan_modes:
self.warning("Unsupported '%s' fan mode", fan_mode)
return
if fan_mode == FAN_ON:
mode = ThermostatFanMode.ON
else:
mode = ThermostatFanMode.AUTO
await self._fan.async_set_speed(mode)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target operation mode."""
if hvac_mode not in self.hvac_modes:
self.warning(
"can't set '%s' mode. Supported modes are: %s",
hvac_mode,
self.hvac_modes,
)
return
if await self._thrm.async_set_operation_mode(HVAC_MODE_2_SYSTEM[hvac_mode]):
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
self.debug("preset mode '%s' is not supported", preset_mode)
return
if self.preset_mode not in (preset_mode, PRESET_NONE):
if not await self.async_preset_handler(self.preset_mode, enable=False):
self.debug("Couldn't turn off '%s' preset", self.preset_mode)
return
if preset_mode != PRESET_NONE:
if not await self.async_preset_handler(preset_mode, enable=True):
self.debug("Couldn't turn on '%s' preset", preset_mode)
return
self._preset = preset_mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
hvac_mode = kwargs.get(ATTR_HVAC_MODE)
if hvac_mode is not None:
await self.async_set_hvac_mode(hvac_mode)
thrm = self._thrm
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
success = True
if low_temp is not None:
low_temp = int(low_temp * ZCL_TEMP)
success = success and await thrm.async_set_heating_setpoint(
low_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting heating %s setpoint: %s", low_temp, success)
if high_temp is not None:
high_temp = int(high_temp * ZCL_TEMP)
success = success and await thrm.async_set_cooling_setpoint(
high_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting cooling %s setpoint: %s", low_temp, success)
elif temp is not None:
temp = int(temp * ZCL_TEMP)
if self.hvac_mode == HVAC_MODE_COOL:
success = await thrm.async_set_cooling_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
elif self.hvac_mode == HVAC_MODE_HEAT:
success = await thrm.async_set_heating_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
else:
self.debug("Not setting temperature for '%s' mode", self.hvac_mode)
return
else:
self.debug("incorrect %s setting for '%s' mode", kwargs, self.hvac_mode)
return
if success:
self.async_write_ha_state()
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode via handler."""
handler = getattr(self, f"async_preset_handler_{preset}")
return await handler(enable)
@STRICT_MATCH(
channel_names={CHANNEL_THERMOSTAT, "sinope_manufacturer_specific"},
manufacturers="Sinope Technologies",
)
class SinopeTechnologiesThermostat(Thermostat):
"""Sinope Technologies Thermostat."""
manufacturer = 0x119C
update_time_interval = timedelta(minutes=randint(45, 75))
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._presets = [PRESET_AWAY, PRESET_NONE]
self._supported_flags |= SUPPORT_PRESET_MODE
self._manufacturer_ch = self.cluster_channels["sinope_manufacturer_specific"]
@callback
def _async_update_time(self, timestamp=None) -> None:
"""Update thermostat's time display."""
secs_2k = (
dt_util.now().replace(tzinfo=None) - datetime(2000, 1, 1, 0, 0, 0, 0)
).total_seconds()
self.debug("Updating time: %s", secs_2k)
self._manufacturer_ch.cluster.create_catching_task(
self._manufacturer_ch.cluster.write_attributes(
{"secs_since_2k": secs_2k}, manufacturer=self.manufacturer
)
)
async def async_added_to_hass(self):
"""Run when about to be added to Hass."""
await super().async_added_to_hass()
async_track_time_interval(
self.hass, self._async_update_time, self.update_time_interval
)
self._async_update_time()
async def async_preset_handler_away(self, is_away: bool = False) -> bool:
"""Set occupancy."""
mfg_code = self._zha_device.manufacturer_code
res = await self._thrm.write_attributes(
{"set_occupancy": 0 if is_away else 1}, manufacturer=mfg_code
)
self.debug("set occupancy to %s. Status: %s", 0 if is_away else 1, res)
return res
@STRICT_MATCH(
channel_names=CHANNEL_THERMOSTAT,
aux_channels=CHANNEL_FAN,
manufacturers="Zen Within",
)
class ZenWithinThermostat(Thermostat):
"""Zen Within Thermostat implementation."""
@property
def _rm_rs_action(self) -> Optional[str]:
"""Return the current HVAC action based on running mode and running state."""
running_state = self._thrm.running_state
if running_state is None:
return None
if running_state & (RunningState.HEAT | RunningState.HEAT_STAGE_2):
return CURRENT_HVAC_HEAT
if running_state & (RunningState.COOL | RunningState.COOL_STAGE_2):
return CURRENT_HVAC_COOL
if running_state & (
RunningState.FAN | RunningState.FAN_STAGE_2 | RunningState.FAN_STAGE_3
):
return CURRENT_HVAC_FAN
if self.hvac_mode != HVAC_MODE_OFF:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@STRICT_MATCH(
channel_names=CHANNEL_THERMOSTAT,
aux_channels=CHANNEL_FAN,
manufacturers="Centralite",
models="3157100",
)
class CentralitePearl(ZenWithinThermostat):
"""Centralite Pearl Thermostat implementation."""
| {
"content_hash": "9cca7bc68f6312bd06569f6be3f3ee58",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 85,
"avg_line_length": 34.36560934891486,
"alnum_prop": 0.6202574690308477,
"repo_name": "tchellomello/home-assistant",
"id": "7ffb727bacce4e71f29458ede12526850a3e5cfb",
"size": "20585",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from cctbx import uctbx
from scitbx.array_family import flex
def quasi_normalisation(intensities):
"""Quasi-normalisation of the input intensities.
Args:
intensities (cctbx.miller.array): The intensities to be normalised.
Returns:
cctbx.miller.array: The normalised intensities.
"""
# handle negative reflections to minimise effect on mean I values.
work = intensities.deep_copy()
work.data().set_selected(work.data() < 0.0, 0.0)
# set up binning objects
if work.size() > 20000:
n_refl_shells = 20
elif work.size() > 15000:
n_refl_shells = 15
else:
n_refl_shells = 10
d_star_sq = work.d_star_sq().data()
d_star_sq_max = flex.max(d_star_sq)
d_star_sq_min = flex.min(d_star_sq)
span = d_star_sq_max - d_star_sq_min
d_star_sq_max += span * 1e-6
d_star_sq_min -= span * 1e-6
d_star_sq_step = (d_star_sq_max - d_star_sq_min) / n_refl_shells
work.setup_binner_d_star_sq_step(
d_min=uctbx.d_star_sq_as_d(d_star_sq_min), # cctbx/cctbx_project#588
d_max=uctbx.d_star_sq_as_d(d_star_sq_max), # cctbx/cctbx_project#588
d_star_sq_step=d_star_sq_step,
auto_binning=False,
)
normalisations = work.intensity_quasi_normalisations()
return intensities.customized_copy(
data=(intensities.data() / normalisations.data()),
sigmas=(intensities.sigmas() / normalisations.data()),
)
| {
"content_hash": "f71dc2abd55c8b9a2f13eaff7191759b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 33.70454545454545,
"alnum_prop": 0.6365475387727579,
"repo_name": "dials/dials",
"id": "238caba4d1c85f94f5ba4fd50ff6d52c91507976",
"size": "1483",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/dials/util/normalisation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "379"
},
{
"name": "C++",
"bytes": "1758129"
},
{
"name": "CMake",
"bytes": "34388"
},
{
"name": "Dockerfile",
"bytes": "329"
},
{
"name": "Gherkin",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "25033"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "6147100"
},
{
"name": "Shell",
"bytes": "6419"
}
],
"symlink_target": ""
} |
import re
import operator
debug = False
test = False
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b' + word + r'(?![\w-])' # added look ahead for hyphen
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig.
#word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path):
self.stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores)
sorted_keywords = sorted(keyword_candidates.iteritems(), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if test:
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
#stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "SmartStoplist.txt" #SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print keywordcandidates
sortedKeywords = sorted(keywordcandidates.iteritems(), key=operator.itemgetter(1), reverse=True)
if debug: print sortedKeywords
totalKeywords = len(sortedKeywords)
if debug: print totalKeywords
print sortedKeywords[0:(totalKeywords / 3)]
rake = Rake("SmartStoplist.txt")
keywords = rake.run(text)
print keywords
| {
"content_hash": "01700f4f4fb7ee6dddd7ac0732baca83",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 580,
"avg_line_length": 39.21604938271605,
"alnum_prop": 0.6713363765150323,
"repo_name": "njordsir/Movie-Script-Analysis",
"id": "550161cba59c82d5f2263ff71bd75e51c4734f79",
"size": "6669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Character-Networks-and-Theme-Centralization/rake_topic_extraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1684666"
},
{
"name": "Python",
"bytes": "39950"
}
],
"symlink_target": ""
} |
if not has_pobj_str('on'):
if(has_dobj()):
subject = get_dobj()
else:
subject = get_object(get_dobj_str())
else:
if(has_pobj('on')):
origin = get_pobj('on')
else:
origin = get_object(get_pobj_str('on'))
subjects = get_dobj_str().split(' ', 1)
if(len(subjects) == 2):
stype, name = subjects
else:
stype = None
name = subjects[0]
if(stype == 'verb'):
subject = origin.get_verb(name)
if subject is None:
raise NoSuchVerbError(name)
elif(stype in ('property', 'prop', 'value', 'val')):
subject = origin.get_property(name)
if subject is None:
raise NoSuchPropertyError(name, origin)
else:
subject = origin.get_verb(name) or origin.get_property(name)
if subject is None:
raise NoSuchPropertyError(name, origin)
edit(subject)
| {
"content_hash": "59dd5d95d7d7a73b380b12465c0777a9",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 68,
"avg_line_length": 28.4375,
"alnum_prop": 0.554945054945055,
"repo_name": "philchristensen/antioch",
"id": "9ec873db3f7159f1b6e29b244c1c5ff623a64cda",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "antioch/core/bootstrap/default_verbs/wizard_class_edit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12977"
},
{
"name": "Dockerfile",
"bytes": "850"
},
{
"name": "HTML",
"bytes": "21856"
},
{
"name": "JavaScript",
"bytes": "18844"
},
{
"name": "Python",
"bytes": "353482"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
} |
__author__ = 'nikolojedison'
from wpilib.command import CommandGroup
#Woo, commands
from commands.setpoint_commands.close_claw import CloseClaw
from commands.semiauto_commands.drive_straight import DriveStraight
from commands.setpoint_commands.lift_go_to_level import LiftGoToLevel
from commands.manual_commands.mecanum_drive_with_joystick import MecanumDriveWithJoystick
from commands.setpoint_commands.open_claw import OpenClaw
from commands.set_claw_setpoint import SetClawSetpoint
from commands.set_lift_setpoint import SetLiftSetpoint
from commands.set_mast_setpoint import SetMastSetpoint
from commands.setpoint_commands.grab_tote import GrabTote
from commands.semiauto_commands.turn import Turn
from commands.setpoint_commands.lift_stuff import LiftStuff
class ThreeToteAutonomous(CommandGroup):
#Should really put some setpoints in. Ehhhhhhh...
def __init__(self, robot):
super().__init__()
self.auton_generator = [
DriveStraight(robot, 0, -1, timeout=.25),
Turn(robot, -30),
DriveStraight(robot, -1, 0, timeout=.1),
DriveStraight(robot, 0, -1, timeout=.1),
GrabTote(robot),
LiftStuff(robot, 1, 1.5),
GrabTote(robot),
]
# DriveStraight(robot, 0, -.75, timeout=.5),
# Turn(robot, 90),
# DriveStraight(robot, 0, -.75, timeout=.5),
# Turn(robot, -90),
# DriveStraight(robot, 0, -.75, timeout=.5),
# Turn(robot, 90),
# DriveStraight(robot, 0, -.75, timeout=.5),
# Turn(robot, -90),
# DriveStraight(robot, 0, -.75, timeout=.25),
# OpenClaw(robot), #drops first tote on second tote
# LiftStuff(robot, -.75, 2),
# GrabTote(robot),
#LiftStuff(robot, .75, 2),
# Turn(robot, 90),
# DriveStraight(robot, 0, -.75, timeout=.5),
# Turn(robot, -90),
# DriveStraight(robot, 0, -.75, timeout=.5),
# Turn(robot, 90),
# DriveStraight(robot, 0, -.75, timeout=.5),
# Turn(robot, -90),
# DriveStraight(robot, 0, -.75, timeout=.25),
# OpenClaw(robot) #drops 1st and 2nd totes on 3rd tote
# LiftStuff(robot, -.75, 2),
# GrabTote(robot),
# LiftStuff(robot, -.75, 2),
#Turn(robot, 90),
# DriveStraight(robot, 0, .75, timeout=.5)] #drives around for a bit
for i in self.auton_generator: self.addSequential(i)
def cancel(self):
for i in self.auton_generator: i._cancel()
super().cancel()
| {
"content_hash": "017d4e30f970663df668dce303447a60",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 89,
"avg_line_length": 41.58730158730159,
"alnum_prop": 0.6076335877862595,
"repo_name": "DenfeldRobotics4009/2015_Lopez_Jr",
"id": "5e3b4e96ab44c44e7020ce8f8f3e3567d707f798",
"size": "2620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/auto_commands/three_tote_autonomous.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "63765"
}
],
"symlink_target": ""
} |
"""Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu (jieyu@umich.edu)
"""
from maple.core import logging
from maple.core import static_info
from maple.idiom import iroot
from maple.idiom import memo
from maple.regression import common
"""
Expected Results (predicted iroots):
------------------------------------
1 IDIOM_1
e0: WRITE [70 target 0x742 mem_idiom2.cc +36]
e1: WRITE [72 target 0x788 mem_idiom2.cc +29]
2 IDIOM_1
e0: WRITE [56 target 0x76a mem_idiom2.cc +26]
e1: WRITE [65 target 0x738 mem_idiom2.cc +35]
3 IDIOM_3
e0: WRITE [56 target 0x76a mem_idiom2.cc +26]
e1: WRITE [65 target 0x738 mem_idiom2.cc +35]
e2: WRITE [70 target 0x742 mem_idiom2.cc +36]
e3: WRITE [72 target 0x788 mem_idiom2.cc +29]
(We should not predict any idiom2 iroot.)
"""
def source_name():
return __name__ + common.cxx_ext()
def setup_profiler(profiler):
profiler.knobs['ignore_lib'] = True
profiler.knobs['complex_idioms'] = True
profiler.knobs['vw'] = 2000
def setup_testcase(testcase):
testcase.threshold = 2
def verify(profiler, testcase):
sinfo = static_info.StaticInfo()
sinfo.load(profiler.knobs['sinfo_out'])
iroot_db = iroot.iRootDB(sinfo)
iroot_db.load(profiler.knobs['iroot_out'])
for r in iroot_db.iroot_map.itervalues():
if r.idiom() == 2:
logging.msg('idiom2 iroot should not be predicted\n')
return False
return True
| {
"content_hash": "8a4e6c169570ee9743851f32ded36eff",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 72,
"avg_line_length": 33.12903225806452,
"alnum_prop": 0.6689386562804285,
"repo_name": "jieyu/maple",
"id": "28a6ae495ba2b3b1770deb58e8d7133109bdcd3c",
"size": "2054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/idiom/predictor/mem_idiom2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3636"
},
{
"name": "C++",
"bytes": "1407098"
},
{
"name": "Makefile",
"bytes": "14594"
},
{
"name": "Protocol Buffer",
"bytes": "8613"
},
{
"name": "Python",
"bytes": "401005"
},
{
"name": "Shell",
"bytes": "1733"
}
],
"symlink_target": ""
} |
"""
Django settings for taxi project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'oi9r_r=r8m3-5&-fmpxi2y)r(b3i3$a6odk9#!hm7sx6td($#4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'djcelery',
'api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'taxi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'taxi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/1'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
CELERY_IGNORE_RESULT = True
CELERY_ACCEPT_CONTENT = ['pickle', 'json', ]
REST_FRAMEWORK = {
'PAGINATE_BY': 20,
}
try:
from .settings_local import *
except ImportError:
pass
| {
"content_hash": "3fc20abf6a3a1bd6f4a62070ee635b59",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 71,
"avg_line_length": 24.584745762711865,
"alnum_prop": 0.6963116166839021,
"repo_name": "rombr/agile-fusion-test-task",
"id": "77aacfe6ecc96cf384866a64fab0a7784e2fb53b",
"size": "2901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taxi/taxi/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15049"
}
],
"symlink_target": ""
} |
from scraper import GoogleAnalyticsScraper
| {
"content_hash": "220a284859ab522cc6569619522b5d17",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 42,
"avg_line_length": 43,
"alnum_prop": 0.9069767441860465,
"repo_name": "nprapps/graeae",
"id": "2bdb45001c34d1e3b1c9eab112b4a30201908715",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/google_analytics/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "122377"
},
{
"name": "HTML",
"bytes": "303664"
},
{
"name": "JavaScript",
"bytes": "486080"
},
{
"name": "Nginx",
"bytes": "136"
},
{
"name": "Python",
"bytes": "134356"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
} |
"""
WSGI config for tsace project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings.development")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "5b2acdf7f9a040ae0eb863c6aff73c0a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 28.5,
"alnum_prop": 0.7769423558897243,
"repo_name": "c17r/tsace",
"id": "02e43172e1f48f106920c97c21af45ee2196e3b5",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "791"
},
{
"name": "HTML",
"bytes": "3873"
},
{
"name": "JavaScript",
"bytes": "4916"
},
{
"name": "Python",
"bytes": "32287"
}
],
"symlink_target": ""
} |
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import os
import socket
import sys
from cinder.openstack.common import cfg
FLAGS = cfg.CONF
def parse_args(argv, default_config_files=None):
FLAGS.disable_interspersed_args()
return argv[:1] + FLAGS(argv[1:],
project='cinder',
default_config_files=default_config_files)
class UnrecognizedFlag(Exception):
pass
def DECLARE(name, module_string, flag_values=FLAGS):
if module_string not in sys.modules:
__import__(module_string, globals(), locals())
if name not in flag_values:
raise UnrecognizedFlag('%s not defined by %s' % (name, module_string))
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
core_opts = [
cfg.StrOpt('connection_type',
default=None,
help='Virtualization api connection type : libvirt, xenapi, '
'or fake'),
cfg.StrOpt('sql_connection',
default='sqlite:///$state_path/$sqlite_db',
help='The SQLAlchemy connection string used to connect to the '
'database'),
cfg.IntOpt('sql_connection_debug',
default=0,
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for cinder-api'),
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the cinder python module is installed'),
cfg.StrOpt('bindir',
default='$pybasedir/bin',
help='Directory where cinder binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining cinder's state"),
]
debug_opts = [
]
FLAGS.register_cli_opts(core_opts)
FLAGS.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='default glance hostname or ip'),
cfg.IntOpt('glance_port',
default=9292,
help='default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance api servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='the topic scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='cinder-volume',
help='the topic volume nodes listen on'),
cfg.BoolOpt('api_rate_limit',
default=True,
help='whether to rate limit the api'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.openstack.'
'volume.contrib.select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=[
'cinder.api.openstack.volume.contrib.standard_extensions'
],
help='osapi volume extension to load'),
cfg.StrOpt('osapi_compute_link_prefix',
default=None,
help='Base URL that will be presented to users in links '
'to the OpenStack Compute API'),
cfg.IntOpt('osapi_max_limit',
default=1000,
help='the maximum number of items returned in a single '
'response from a collection resource'),
cfg.StrOpt('sqlite_db',
default='cinder.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If passed, use synchronous mode for sqlite'),
cfg.IntOpt('sql_idle_timeout',
default=3600,
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('sql_max_retries',
default=10,
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('sql_retry_interval',
default=10,
help='interval between retries of opening a sql connection'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='full class name for the Manager for volume'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='availability zone of this node'),
cfg.ListOpt('memcached_servers',
default=None,
help='Memcached servers or None for in process cache.'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='time period to generate instance usages for. '
'Time period must be hour, day, month or year'),
cfg.StrOpt('root_helper',
default='sudo',
help='Deprecated: command to use for running commands as root'),
cfg.StrOpt('rootwrap_config',
default=None,
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Whether to log monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth. Supports noauth, keystone, '
'and deprecated.'),
cfg.StrOpt('control_exchange',
default='cinder',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
FLAGS.register_opts(global_opts)
| {
"content_hash": "cb1b7a3b44c7c62023db8b9f208e0ccc",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 39.41379310344828,
"alnum_prop": 0.5725534308211474,
"repo_name": "freedomhui/cinder",
"id": "87103c83a3dbee65b07eda06b442014b5fef8a47",
"size": "8809",
"binary": false,
"copies": "2",
"ref": "refs/heads/localstorage",
"path": "cinder/flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2021121"
},
{
"name": "Shell",
"bytes": "7441"
}
],
"symlink_target": ""
} |
"""Parses the generic policy files and return a policy object for acl rendering.
"""
__author__ = ['pmoody@google.com',
'watson@google.com']
import datetime
import os
import sys
from lib import nacaddr
from lib import naming
from ply import lex
from ply import yacc
import logging
DEFINITIONS = None
DEFAULT_DEFINITIONS = './def'
ACTIONS = set(('accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'))
_LOGGING = set(('true', 'True', 'syslog', 'local', 'disable', 'log-both'))
_OPTIMIZE = True
_SHADE_CHECK = False
class Error(Exception):
"""Generic error class."""
class FileNotFoundError(Error):
"""Policy file unable to be read."""
class FileReadError(Error):
"""Policy file unable to be read."""
class RecursionTooDeepError(Error):
"""Included files exceed maximum recursion depth."""
class ParseError(Error):
"""ParseError in the input."""
class TermAddressExclusionError(Error):
"""Excluded address block is not contained in the accepted address block."""
class TermObjectTypeError(Error):
"""Error with an object passed to Term."""
class TermPortProtocolError(Error):
"""Error when a requested protocol doesn't have any of the requested ports."""
class TermProtocolEtherTypeError(Error):
"""Error when both ether-type & upper-layer protocol matches are requested."""
class TermNoActionError(Error):
"""Error when a term hasn't defined an action."""
class TermInvalidIcmpType(Error):
"""Error when a term has invalid icmp-types specified."""
class InvalidTermActionError(Error):
"""Error when an action is invalid."""
class InvalidTermLoggingError(Error):
"""Error when a option is set for logging."""
class UndefinedAddressError(Error):
"""Error when an undefined address is referenced."""
class NoTermsError(Error):
"""Error when no terms were found."""
class ShadingError(Error):
"""Error when a term is shaded by a prior term."""
def TranslatePorts(ports, protocols, term_name):
"""Return all ports of all protocols requested.
Args:
ports: list of ports, eg ['SMTP', 'DNS', 'HIGH_PORTS']
protocols: list of protocols, eg ['tcp', 'udp']
term_name: name of current term, used for warning messages
Returns:
ret_array: list of ports tuples such as [(25,25), (53,53), (1024,65535)]
Note:
Duplication will be taken care of in Term.CollapsePortList
"""
ret_array = []
for proto in protocols:
for port in ports:
service_by_proto = DEFINITIONS.GetServiceByProto(port, proto)
if not service_by_proto:
logging.warn('%s %s %s %s %s %s%s %s', 'Term', term_name,
'has service', port, 'which is not defined with protocol',
proto,
', but will be permitted. Unless intended, you should',
'consider splitting the protocols into separate terms!')
for p in [x.split('-') for x in service_by_proto]:
if len(p) == 1:
ret_array.append((int(p[0]), int(p[0])))
else:
ret_array.append((int(p[0]), int(p[1])))
return ret_array
# classes for storing the object types in the policy files.
class Policy(object):
"""The policy object contains everything found in a given policy file."""
def __init__(self, header, terms):
"""Initiator for the Policy object.
Args:
header: __main__.Header object. contains comments which should be passed
on to the rendered acls as well as the type of acls this policy file
should render to.
terms: list __main__.Term. an array of Term objects which must be rendered
in each of the rendered acls.
Attributes:
filters: list of tuples containing (header, terms).
"""
self.filters = []
self.AddFilter(header, terms)
def AddFilter(self, header, terms):
"""Add another header & filter."""
self.filters.append((header, terms))
self._TranslateTerms(terms)
if _SHADE_CHECK:
self._DetectShading(terms)
def _TranslateTerms(self, terms):
"""."""
if not terms:
raise NoTermsError('no terms found')
for term in terms:
# TODO(pmoody): this probably belongs in Term.SanityCheck(),
# or at the very least, in some method under class Term()
if term.translated:
continue
if term.port:
term.port = TranslatePorts(term.port, term.protocol, term.name)
if not term.port:
raise TermPortProtocolError(
'no ports of the correct protocol for term %s' % (
term.name))
if term.source_port:
term.source_port = TranslatePorts(term.source_port, term.protocol,
term.name)
if not term.source_port:
raise TermPortProtocolError(
'no source ports of the correct protocol for term %s' % (
term.name))
if term.destination_port:
term.destination_port = TranslatePorts(term.destination_port,
term.protocol, term.name)
if not term.destination_port:
raise TermPortProtocolError(
'no destination ports of the correct protocol for term %s' % (
term.name))
# If argument is true, we optimize, otherwise just sort addresses
term.AddressCleanup(_OPTIMIZE)
# Reset _OPTIMIZE global to default value
globals()['_OPTIMIZE'] = True
term.SanityCheck()
term.translated = True
@property
def headers(self):
"""Returns the headers from each of the configured filters.
Returns:
headers
"""
return [x[0] for x in self.filters]
def _DetectShading(self, terms):
"""Finds terms which are shaded (impossible to reach).
Iterate through each term, looking at each prior term. If a prior term
contains every component of the current term then the current term would
never be hit and is thus shaded. This can be a mistake.
Args:
terms: list of Term objects.
Raises:
ShadingError: When a term is impossible to reach.
"""
# Reset _OPTIMIZE global to default value
globals()['_SHADE_CHECK'] = False
shading_errors = []
for index, term in enumerate(terms):
for prior_index in xrange(index):
# Check each term that came before for shading. Terms with next as an
# action do not terminate evaluation, so cannot shade.
if (term in terms[prior_index]
and 'next' not in terms[prior_index].action):
shading_errors.append(
' %s is shaded by %s.' % (
term.name, terms[prior_index].name))
if shading_errors:
raise ShadingError('\n'.join(shading_errors))
def __eq__(self, obj):
"""Compares for equality against another Policy object.
Note that it is picky and requires the list contents to be in the
same order.
Args:
obj: object to be compared to for equality.
Returns:
True if the list of filters in this policy object is equal to the list
in obj and False otherwise.
"""
if not isinstance(obj, Policy):
return False
return self.filters == obj.filters
def __str__(self):
def tuple_str(tup):
return '%s:%s' % (tup[0], tup[1])
return 'Policy: {%s}' % ', '.join(map(tuple_str, self.filters))
def __repr__(self):
return self.__str__()
class Term(object):
"""The Term object is used to store each of the terms.
Args:
obj: an object of type VarType or a list of objects of type VarType
members:
address/source_address/destination_address/: list of
VarType.(S|D)?ADDRESS's
address_exclude/source_address_exclude/destination_address_exclude: list of
VarType.(S|D)?ADDEXCLUDE's
port/source_port/destination_port: list of VarType.(S|D)?PORT's
options: list of VarType.OPTION's.
protocol: list of VarType.PROTOCOL's.
counter: VarType.COUNTER
action: list of VarType.ACTION's
dscp-set: VarType.DSCP_SET
dscp-match: VarType.DSCP_MATCH
dscp-except: VarType.DSCP_EXCEPT
comments: VarType.COMMENT
forwarding-class: VarType.FORWARDING_CLASS
expiration: VarType.EXPIRATION
verbatim: VarType.VERBATIM
logging: VarType.LOGGING
next-ip: VarType.NEXT_IP
qos: VarType.QOS
policer: VarType.POLICER
vpn: VarType.VPN
"""
ICMP_TYPE = {4: {'echo-reply': 0,
'unreachable': 3,
'source-quench': 4,
'redirect': 5,
'alternate-address': 6,
'echo-request': 8,
'router-advertisement': 9,
'router-solicitation': 10,
'time-exceeded': 11,
'parameter-problem': 12,
'timestamp-request': 13,
'timestamp-reply': 14,
'information-request': 15,
'information-reply': 16,
'mask-request': 17,
'mask-reply': 18,
'conversion-error': 31,
'mobile-redirect': 32,
},
6: {'destination-unreachable': 1,
'packet-too-big': 2,
'time-exceeded': 3,
'parameter-problem': 4,
'echo-request': 128,
'echo-reply': 129,
'multicast-listener-query': 130,
'multicast-listener-report': 131,
'multicast-listener-done': 132,
'router-solicit': 133,
'router-advertisement': 134,
'neighbor-solicit': 135,
'neighbor-advertisement': 136,
'redirect-message': 137,
'router-renumbering': 138,
'icmp-node-information-query': 139,
'icmp-node-information-response': 140,
'inverse-neighbor-discovery-solicitation': 141,
'inverse-neighbor-discovery-advertisement': 142,
'version-2-multicast-listener-report': 143,
'home-agent-address-discovery-request': 144,
'home-agent-address-discovery-reply': 145,
'mobile-prefix-solicitation': 146,
'mobile-prefix-advertisement': 147,
'certification-path-solicitation': 148,
'certification-path-advertisement': 149,
'multicast-router-advertisement': 151,
'multicast-router-solicitation': 152,
'multicast-router-termination': 153,
},
}
_IPV6_BYTE_SIZE = 4
def __init__(self, obj):
self.name = None
self.action = []
self.address = []
self.address_exclude = []
self.comment = []
self.counter = None
self.expiration = None
self.destination_address = []
self.destination_address_exclude = []
self.destination_port = []
self.destination_prefix = []
self.forwarding_class = None
self.logging = []
self.loss_priority = None
self.option = []
self.owner = None
self.policer = None
self.port = []
self.precedence = []
self.principals = []
self.protocol = []
self.protocol_except = []
self.qos = None
self.routing_instance = None
self.source_address = []
self.source_address_exclude = []
self.source_port = []
self.source_prefix = []
self.verbatim = []
# juniper specific.
self.packet_length = None
self.fragment_offset = None
self.hop_limit = None
self.icmp_type = []
self.ether_type = []
self.traffic_type = []
self.translated = False
self.dscp_set = None
self.dscp_match = []
self.dscp_except = []
self.next_ip = None
# srx specific
self.vpn = None
# gce specific
self.source_tag = []
self.destination_tag = []
# iptables specific
self.source_interface = None
self.destination_interface = None
self.platform = []
self.platform_exclude = []
self.timeout = None
self.flattened = False
self.flattened_addr = None
self.flattened_saddr = None
self.flattened_daddr = None
# AddObject touches variables which might not have been initialized
# further up so this has to be at the end.
self.AddObject(obj)
def __contains__(self, other):
"""Determine if other term is contained in this term."""
if self.verbatim or other.verbatim:
# short circuit these
if sorted(self.verbatim) != sorted(other.verbatim):
return False
# check protocols
# either protocol or protocol-except may be used, not both at the same time.
if self.protocol:
if other.protocol:
if not self.CheckProtocolIsContained(other.protocol, self.protocol):
return False
# this term has protocol, other has protocol_except.
elif other.protocol_except:
return False
else:
# other does not have protocol or protocol_except. since we do other
# cannot be contained in self.
return False
elif self.protocol_except:
if other.protocol_except:
if self.CheckProtocolIsContained(
self.protocol_except, other.protocol_except):
return False
elif other.protocol:
for proto in other.protocol:
if proto in self.protocol_except:
return False
else:
return False
# combine addresses with exclusions for proper contains comparisons.
if not self.flattened:
self.FlattenAll()
if not other.flattened:
other.FlattenAll()
# flat 'address' is compared against other flat (saddr|daddr).
# if NONE of these evaluate to True other is not contained.
if not (
self.CheckAddressIsContained(
self.flattened_addr, other.flattened_addr)
or self.CheckAddressIsContained(
self.flattened_addr, other.flattened_saddr)
or self.CheckAddressIsContained(
self.flattened_addr, other.flattened_daddr)):
return False
# compare flat address from other to flattened self (saddr|daddr).
if not (
# other's flat address needs both self saddr & daddr to contain in order
# for the term to be contained. We already compared the flattened_addr
# attributes of both above, which was not contained.
self.CheckAddressIsContained(
other.flattened_addr, self.flattened_saddr)
and self.CheckAddressIsContained(
other.flattened_addr, self.flattened_daddr)):
return False
# basic saddr/daddr check.
if not (
self.CheckAddressIsContained(
self.flattened_saddr, other.flattened_saddr)):
return False
if not (
self.CheckAddressIsContained(
self.flattened_daddr, other.flattened_daddr)):
return False
if not (
self.CheckPrincipalsContained(
self.principals, other.principals)):
return False
# check ports
# like the address directive, the port directive is special in that it can
# be either source or destination.
if self.port:
if not (self.CheckPortIsContained(self.port, other.port) or
self.CheckPortIsContained(self.port, other.sport) or
self.CheckPortIsContained(self.port, other.dport)):
return False
if not self.CheckPortIsContained(self.source_port, other.source_port):
return False
if not self.CheckPortIsContained(self.destination_port,
other.destination_port):
return False
# prefix lists
if self.source_prefix:
if sorted(self.source_prefix) != sorted(other.source_prefix):
return False
if self.destination_prefix:
if sorted(self.destination_prefix) != sorted(
other.destination_prefix):
return False
# check source and destination tags
if self.source_tag:
if sorted(self.source_tag != sorted(other.source_tag)):
return False
if sorted(self.destination_tag != sorted(other.destination_tag)):
return False
# check precedence
if self.precedence:
if not other.precedence:
return False
for precedence in other.precedence:
if precedence not in self.precedence:
return False
# check various options
if self.option:
if not other.option:
return False
for opt in other.option:
if opt not in self.option:
return False
# check forwarding-class
if self.forwarding_class:
if not other.forwarding_class:
return False
if self.next_ip:
if not other.next_ip:
return False
if self.fragment_offset:
# fragment_offset looks like 'integer-integer' or just, 'integer'
sfo = [int(x) for x in self.fragment_offset.split('-')]
if other.fragment_offset:
ofo = [int(x) for x in other.fragment_offset.split('-')]
if sfo[0] < ofo[0] or sorted(sfo[1:]) > sorted(ofo[1:]):
return False
else:
return False
if self.hop_limit:
# hop_limit looks like 'integer-integer' or just, 'integer'
shl = [int(x) for x in self.hop_limit.split('-')]
if other.hop_limit:
ohl = [int(x) for x in other.hop_limit.split('-')]
if shl[0] < ohl[0]:
return False
shll, ohll = shl[1:2], ohl[1:2]
if shll and ohll:
if shl[0] > ohl[0]:
return False
else:
return False
if self.packet_length:
# packet_length looks like 'integer-integer' or just, 'integer'
spl = [int(x) for x in self.packet_length.split('-')]
if other.packet_length:
opl = [int(x) for x in other.packet_length.split('-')]
if spl[0] < opl[0] or sorted(spl[1:]) > sorted(opl[1:]):
return False
else:
return False
if self.icmp_type:
if sorted(self.icmp_type) is not sorted(other.icmp_type):
return False
# check platform
if self.platform:
if sorted(self.platform) is not sorted(other.platform):
return False
if self.platform_exclude:
if sorted(self.platform_exclude) is not sorted(other.platform_exclude):
return False
# we have containment
return True
def __str__(self):
ret_str = []
ret_str.append(' name: %s' % self.name)
if self.address:
ret_str.append(' address: %s' % self.address)
if self.address_exclude:
ret_str.append(' address_exclude: %s' % self.address_exclude)
if self.source_address:
ret_str.append(' source_address: %s' % self.source_address)
if self.source_address_exclude:
ret_str.append(' source_address_exclude: %s' %
self.source_address_exclude)
if self.source_tag:
ret_str.append(' source_tag: %s' % self.source_tag)
if self.destination_address:
ret_str.append(' destination_address: %s' % self.destination_address)
if self.destination_address_exclude:
ret_str.append(' destination_address_exclude: %s' %
self.destination_address_exclude)
if self.destination_tag:
ret_str.append(' destination_tag: %s' % self.destination_tag)
if self.source_prefix:
ret_str.append(' source_prefix: %s' % self.source_prefix)
if self.destination_prefix:
ret_str.append(' destination_prefix: %s' % self.destination_prefix)
if self.forwarding_class:
ret_str.append(' forwarding_class: %s' % self.forwarding_class)
if self.next_ip:
ret_str.append(' next_ip: %s' % self.next_ip)
if self.protocol:
ret_str.append(' protocol: %s' % self.protocol)
if self.protocol_except:
ret_str.append(' protocol-except: %s' % self.protocol_except)
if self.owner:
ret_str.append(' owner: %s' % self.owner)
if self.port:
ret_str.append(' port: %s' % self.port)
if self.source_port:
ret_str.append(' source_port: %s' % self.source_port)
if self.destination_port:
ret_str.append(' destination_port: %s' % self.destination_port)
if self.action:
ret_str.append(' action: %s' % self.action)
if self.option:
ret_str.append(' option: %s' % self.option)
if self.qos:
ret_str.append(' qos: %s' % self.qos)
if self.logging:
ret_str.append(' logging: %s' % self.logging)
if self.counter:
ret_str.append(' counter: %s' % self.counter)
if self.source_interface:
ret_str.append(' source_interface: %s' % self.source_interface)
if self.destination_interface:
ret_str.append(' destination_interface: %s' % self.destination_interface)
if self.expiration:
ret_str.append(' expiration: %s' % self.expiration)
if self.platform:
ret_str.append(' platform: %s' % self.platform)
if self.platform_exclude:
ret_str.append(' platform_exclude: %s' % self.platform_exclude)
if self.timeout:
ret_str.append(' timeout: %s' % self.timeout)
if self.vpn:
vpn_name, pair_policy = self.vpn
if pair_policy:
ret_str.append(' vpn: name = %s, pair_policy = %s' %
(vpn_name, pair_policy))
else:
ret_str.append(' vpn: name = %s' % vpn_name)
return '\n'.join(ret_str)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
# action
if sorted(self.action) != sorted(other.action):
return False
# addresses.
if not (sorted(self.address) == sorted(other.address) and
sorted(self.source_address) == sorted(other.source_address) and
sorted(self.source_address_exclude) ==
sorted(other.source_address_exclude) and
sorted(self.destination_address) ==
sorted(other.destination_address) and
sorted(self.destination_address_exclude) ==
sorted(other.destination_address_exclude)):
return False
# prefix lists
if not (sorted(self.source_prefix) == sorted(other.source_prefix) and
sorted(self.destination_prefix) ==
sorted(other.destination_prefix)):
return False
# ports
if not (sorted(self.port) == sorted(other.port) and
sorted(self.source_port) == sorted(other.source_port) and
sorted(self.destination_port) == sorted(other.destination_port)):
return False
# protocol
if not (sorted(self.protocol) == sorted(other.protocol) and
sorted(self.protocol_except) == sorted(other.protocol_except)):
return False
# option
if sorted(self.option) != sorted(other.option):
return False
# qos
if self.qos != other.qos:
return False
# verbatim
if self.verbatim != other.verbatim:
return False
# policer
if self.policer != other.policer:
return False
# interface
if self.source_interface != other.source_interface:
return False
if self.destination_interface != other.destination_interface:
return False
# tags
if not (sorted(self.source_tag) == sorted(other.source_tag) and
sorted(self.destination_tag) == sorted(other.destination_tag)):
return False
if sorted(self.logging) != sorted(other.logging):
return False
if self.qos != other.qos:
return False
if self.packet_length != other.packet_length:
return False
if self.fragment_offset != other.fragment_offset:
return False
if self.hop_limit != other.hop_limit:
return False
if sorted(self.icmp_type) != sorted(other.icmp_type):
return False
if sorted(self.ether_type) != sorted(other.ether_type):
return False
if sorted(self.traffic_type) != sorted(other.traffic_type):
return False
# vpn
if self.vpn != other.vpn:
return False
# platform
if not (sorted(self.platform) == sorted(other.platform) and
sorted(self.platform_exclude) == sorted(other.platform_exclude)):
return False
# timeout
if self.timeout != other.timeout:
return False
# precedence
if self.precedence != other.precedence:
return False
# forwarding-class
if self.forwarding_class != other.forwarding_class:
return False
# next_ip
if self.next_ip != other.next_ip:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def AddressesByteLength(self):
"""Returns the byte length of all IP addresses in the term.
This is used in the srx generator due to a address size limitation.
Returns:
counter: Byte length of the sum of both source and destination IPs.
"""
counter = 0
for i in self.source_address:
if i.version == 6:
counter += self._IPV6_BYTE_SIZE
else:
counter += 1
for i in self.destination_address:
if i.version == 6:
counter += self._IPV6_BYTE_SIZE
else:
counter += 1
return counter
def FlattenAll(self):
"""Reduce source, dest, and address fields to their post-exclude state.
Populates the self.flattened_addr, self.flattened_saddr,
self.flattened_daddr by removing excludes from includes.
"""
# No excludes, set flattened attributes and move along.
self.flattened = True
if not (self.source_address_exclude or self.destination_address_exclude or
self.address_exclude):
self.flattened_saddr = self.source_address
self.flattened_daddr = self.destination_address
self.flattened_addr = self.address
return
if self.source_address_exclude:
self.flattened_saddr = self._FlattenAddresses(
self.source_address, self.source_address_exclude)
if self.destination_address_exclude:
self.flattened_daddr = self._FlattenAddresses(
self.destination_address, self.destination_address_exclude)
if self.address_exclude:
self.flattened_addr = self._FlattenAddresses(
self.address, self.address_exclude)
@staticmethod
def _FlattenAddresses(include, exclude):
"""Reduce an include and exclude list to a single include list.
Using recursion, whittle away exclude addresses from address include
addresses which contain the exclusion.
Args:
include: list of include addresses.
exclude: list of exclude addresses.
Returns:
a single flattened list of nacaddr objects.
"""
if not exclude:
return include
for index, in_addr in enumerate(include):
for ex_addr in exclude:
if ex_addr in in_addr:
reduced_list = in_addr.address_exclude(ex_addr)
include[index] = None
for term in Term._FlattenAddresses(reduced_list, exclude[1:]):
if term not in include:
include.append(term)
elif in_addr in ex_addr:
include[index] = None
# Remove items from include outside of the enumerate loop
while None in include:
include.remove(None)
return include
def GetAddressOfVersion(self, addr_type, af=None):
"""Returns addresses of the appropriate Address Family.
Args:
addr_type: string, this will be either
'source_address', 'source_address_exclude',
'destination_address' or 'destination_address_exclude'
af: int or None, either Term.INET4 or Term.INET6
Returns:
list of addresses of the correct family.
"""
if not af:
return getattr(self, addr_type)
return filter(lambda x: x.version == af, getattr(self, addr_type))
def AddObject(self, obj):
"""Add an object of unknown type to this term.
Args:
obj: single or list of either
[Address, Port, Option, Protocol, Counter, Action, Comment, Expiration]
Raises:
InvalidTermActionError: if the action defined isn't an accepted action.
eg, action:: godofoobar
TermObjectTypeError: if AddObject is called with an object it doesn't
understand.
InvalidTermLoggingError: when a option is set for logging not known.
"""
if type(obj) is list:
for x in obj:
# do we have a list of addresses?
# expanded address fields consolidate naked address fields with
# saddr/daddr.
if x.var_type is VarType.SADDRESS:
saddr = DEFINITIONS.GetNetAddr(x.value)
self.source_address.extend(saddr)
elif x.var_type is VarType.DADDRESS:
daddr = DEFINITIONS.GetNetAddr(x.value)
self.destination_address.extend(daddr)
elif x.var_type is VarType.ADDRESS:
addr = DEFINITIONS.GetNetAddr(x.value)
self.address.extend(addr)
# do we have address excludes?
elif x.var_type is VarType.SADDREXCLUDE:
saddr_exclude = DEFINITIONS.GetNetAddr(x.value)
self.source_address_exclude.extend(saddr_exclude)
elif x.var_type is VarType.DADDREXCLUDE:
daddr_exclude = DEFINITIONS.GetNetAddr(x.value)
self.destination_address_exclude.extend(daddr_exclude)
elif x.var_type is VarType.ADDREXCLUDE:
addr_exclude = DEFINITIONS.GetNetAddr(x.value)
self.address_exclude.extend(addr_exclude)
# do we have a list of ports?
elif x.var_type is VarType.PORT:
self.port.append(x.value)
elif x.var_type is VarType.SPORT:
self.source_port.append(x.value)
elif x.var_type is VarType.DPORT:
self.destination_port.append(x.value)
# do we have a list of protocols?
elif x.var_type is VarType.PROTOCOL:
self.protocol.append(x.value)
# do we have a list of protocol-exceptions?
elif x.var_type is VarType.PROTOCOL_EXCEPT:
self.protocol_except.append(x.value)
# do we have a list of options?
elif x.var_type is VarType.OPTION:
self.option.append(x.value)
elif x.var_type is VarType.PRINCIPALS:
self.principals.append(x.value)
elif x.var_type is VarType.SPFX:
self.source_prefix.append(x.value)
elif x.var_type is VarType.DPFX:
self.destination_prefix.append(x.value)
elif x.var_type is VarType.ETHER_TYPE:
self.ether_type.append(x.value)
elif x.var_type is VarType.TRAFFIC_TYPE:
self.traffic_type.append(x.value)
elif x.var_type is VarType.PRECEDENCE:
self.precedence.append(x.value)
elif x.var_type is VarType.FORWARDING_CLASS:
self.forwarding_class = obj.value
elif x.var_type is VarType.NEXT_IP:
self.next_ip = DEFINITIONS.GetNetAddr(x.value)
elif x.var_type is VarType.PLATFORM:
self.platform.append(x.value)
elif x.var_type is VarType.PLATFORMEXCLUDE:
self.platform_exclude.append(x.value)
elif x.var_type is VarType.DSCP_MATCH:
self.dscp_match.append(x.value)
elif x.var_type is VarType.DSCP_EXCEPT:
self.dscp_except.append(x.value)
elif x.var_type is VarType.STAG:
self.source_tag.append(x.value)
elif x.var_type is VarType.DTAG:
self.destination_tag.append(x.value)
else:
raise TermObjectTypeError(
'%s isn\'t a type I know how to deal with (contains \'%s\')' % (
type(x), x.value))
else:
# stupid no switch statement in python
if obj.var_type is VarType.COMMENT:
self.comment.append(str(obj))
elif obj.var_type is VarType.OWNER:
self.owner = obj.value
elif obj.var_type is VarType.EXPIRATION:
self.expiration = obj.value
elif obj.var_type is VarType.LOSS_PRIORITY:
self.loss_priority = obj.value
elif obj.var_type is VarType.ROUTING_INSTANCE:
self.routing_instance = obj.value
elif obj.var_type is VarType.PRECEDENCE:
self.precedence = obj.value
elif obj.var_type is VarType.FORWARDING_CLASS:
self.forwarding_class = obj.value
elif obj.var_type is VarType.NEXT_IP:
self.next_ip = DEFINITIONS.GetNetAddr(obj.value)
elif obj.var_type is VarType.VERBATIM:
self.verbatim.append(obj)
elif obj.var_type is VarType.ACTION:
if str(obj) not in ACTIONS:
raise InvalidTermActionError('%s is not a valid action' % obj)
self.action.append(obj.value)
elif obj.var_type is VarType.COUNTER:
self.counter = obj
elif obj.var_type is VarType.ICMP_TYPE:
self.icmp_type.extend(obj.value)
elif obj.var_type is VarType.LOGGING:
if str(obj) not in _LOGGING:
raise InvalidTermLoggingError('%s is not a valid logging option' %
obj)
self.logging.append(obj)
# police man, tryin'a take you jail
elif obj.var_type is VarType.POLICER:
self.policer = obj.value
# qos?
elif obj.var_type is VarType.QOS:
self.qos = obj.value
elif obj.var_type is VarType.PACKET_LEN:
self.packet_length = obj.value
elif obj.var_type is VarType.FRAGMENT_OFFSET:
self.fragment_offset = obj.value
elif obj.var_type is VarType.HOP_LIMIT:
self.hop_limit = obj.value
elif obj.var_type is VarType.SINTERFACE:
self.source_interface = obj.value
elif obj.var_type is VarType.DINTERFACE:
self.destination_interface = obj.value
elif obj.var_type is VarType.TIMEOUT:
self.timeout = obj.value
elif obj.var_type is VarType.DSCP_SET:
self.dscp_set = obj.value
elif obj.var_type is VarType.VPN:
self.vpn = (obj.value[0], obj.value[1])
else:
raise TermObjectTypeError(
'%s isn\'t a type I know how to deal with' % (type(obj)))
def SanityCheck(self):
"""Sanity check the definition of the term.
Raises:
ParseError: if term has both verbatim and non-verbatim tokens
TermInvalidIcmpType: if term has invalid icmp-types specified
TermNoActionError: if the term doesn't have an action defined.
TermPortProtocolError: if the term has a service/protocol definition pair
which don't match up, eg. SNMP and tcp
TermAddressExclusionError: if one of the *-exclude directives is defined,
but that address isn't contained in the non *-exclude directive. eg:
source-address::CORP_INTERNAL source-exclude:: LOCALHOST
TermProtocolEtherTypeError: if the term has both ether-type and
upper-layer protocol restrictions
InvalidTermActionError: action and routing-instance both defined
This should be called when the term is fully formed, and
all of the options are set.
"""
if self.verbatim:
if (self.action or self.source_port or self.destination_port or
self.port or self.protocol or self.option):
raise ParseError(
'term "%s" has both verbatim and non-verbatim tokens.' % self.name)
else:
if not self.action and not self.routing_instance and not self.next_ip:
raise TermNoActionError('no action specified for term %s' % self.name)
# have we specified a port with a protocol that doesn't support ports?
if self.source_port or self.destination_port or self.port:
if not any(proto in self.protocol for proto in ['tcp', 'udp', 'sctp']):
raise TermPortProtocolError(
'ports specified with a protocol that doesn\'t support ports. '
'Term: %s ' % self.name)
# TODO(pmoody): do we have mutually exclusive options?
# eg. tcp-established + tcp-initial?
if self.ether_type and (
self.protocol or
self.address or
self.destination_address or
self.destination_address_exclude or
self.destination_port or
self.destination_prefix or
self.source_address or
self.source_address_exclude or
self.source_port or
self.source_prefix):
raise TermProtocolEtherTypeError(
'ether-type not supported when used with upper-layer protocol '
'restrictions. Term: %s' % self.name)
# validate icmp-types if specified, but addr_family will have to be checked
# in the generators as policy module doesn't know about that at this point.
if self.icmp_type:
for icmptype in self.icmp_type:
if (icmptype not in self.ICMP_TYPE[4] and icmptype not in
self.ICMP_TYPE[6]):
raise TermInvalidIcmpType('Term %s contains an invalid icmp-type:'
'%s' % (self.name, icmptype))
def AddressCleanup(self, optimize=True):
"""Do Address and Port collapsing.
Notes:
Collapses both the address definitions and the port definitions
to their smallest possible length.
Args:
optimize: boolean value indicating whether to optimize addresses
"""
if optimize:
cleanup = nacaddr.CollapseAddrList
else:
cleanup = nacaddr.SortAddrList
# address collapsing.
if self.address:
self.address = cleanup(self.address)
if self.source_address:
self.source_address = cleanup(self.source_address)
if self.source_address_exclude:
self.source_address_exclude = cleanup(self.source_address_exclude)
if self.destination_address:
self.destination_address = cleanup(self.destination_address)
if self.destination_address_exclude:
self.destination_address_exclude = cleanup(
self.destination_address_exclude)
# port collapsing.
if self.port:
self.port = self.CollapsePortList(self.port)
if self.source_port:
self.source_port = self.CollapsePortList(self.source_port)
if self.destination_port:
self.destination_port = self.CollapsePortList(self.destination_port)
def CollapsePortListRecursive(self, ports):
"""Given a sorted list of ports, collapse to the smallest required list.
Args:
ports: sorted list of port tuples
Returns:
ret_ports: collapsed list of ports
"""
optimized = False
ret_ports = []
for port in ports:
if not ret_ports:
ret_ports.append(port)
# we should be able to count on ret_ports[-1][0] <= port[0]
elif ret_ports[-1][1] >= port[1]:
# (10, 20) and (12, 13) -> (10, 20)
optimized = True
elif port[0] < ret_ports[-1][1] < port[1]:
# (10, 20) and (15, 30) -> (10, 30)
ret_ports[-1] = (ret_ports[-1][0], port[1])
optimized = True
elif ret_ports[-1][1] + 1 == port[0]:
# (10, 20) and (21, 30) -> (10, 30)
ret_ports[-1] = (ret_ports[-1][0], port[1])
optimized = True
else:
# (10, 20) and (22, 30) -> (10, 20), (22, 30)
ret_ports.append(port)
if optimized:
return self.CollapsePortListRecursive(ret_ports)
return ret_ports
def CollapsePortList(self, ports):
"""Given a list of ports, Collapse to the smallest required.
Args:
ports: a list of port strings eg: [(80,80), (53,53) (2000, 2009),
(1024,65535)]
Returns:
ret_array: the collapsed sorted list of ports, eg: [(53,53), (80,80),
(1024,65535)]
"""
return self.CollapsePortListRecursive(sorted(ports))
def CheckPrincipalsContained(self, superset, subset):
"""Check to if the given list of principals is wholly contained.
Args:
superset: list of principals
subset: list of principals
Returns:
bool: True if subset is contained in superset. false otherwise.
"""
# Skip set comparison if neither term has principals.
if not superset and not subset:
return True
# Convert these lists to sets to use set comparison.
sup = set(superset)
sub = set(subset)
return sub.issubset(sup)
def CheckProtocolIsContained(self, superset, subset):
"""Check if the given list of protocols is wholly contained.
Args:
superset: list of protocols
subset: list of protocols
Returns:
bool: True if subset is contained in superset. false otherwise.
"""
if not superset:
return True
if not subset:
return False
# Convert these lists to sets to use set comparison.
sup = set(superset)
sub = set(subset)
return sub.issubset(sup)
def CheckPortIsContained(self, superset, subset):
"""Check if the given list of ports is wholly contained.
Args:
superset: list of port tuples
subset: list of port tuples
Returns:
bool: True if subset is contained in superset, false otherwise
"""
if not superset:
return True
if not subset:
return False
for sub_port in subset:
not_contains = True
for sup_port in superset:
if (int(sub_port[0]) >= int(sup_port[0])
and int(sub_port[1]) <= int(sup_port[1])):
not_contains = False
break
if not_contains:
return False
return True
def CheckAddressIsContained(self, superset, subset):
"""Check if subset is wholey contained by superset.
Args:
superset: list of the superset addresses
subset: list of the subset addresses
Returns:
True or False.
"""
if not superset:
return True
if not subset:
return False
for sub_addr in subset:
sub_contained = False
for sup_addr in superset:
# ipaddr ensures that version numbers match for inclusion.
if sub_addr in sup_addr:
sub_contained = True
break
if not sub_contained:
return False
return True
class VarType(object):
"""Generic object meant to store lots of basic policy types."""
COMMENT = 0
COUNTER = 1
ACTION = 2
SADDRESS = 3
DADDRESS = 4
ADDRESS = 5
SPORT = 6
DPORT = 7
PROTOCOL_EXCEPT = 8
OPTION = 9
PROTOCOL = 10
SADDREXCLUDE = 11
DADDREXCLUDE = 12
LOGGING = 13
QOS = 14
POLICER = 15
PACKET_LEN = 16
FRAGMENT_OFFSET = 17
ICMP_TYPE = 18
SPFX = 19
DPFX = 20
ETHER_TYPE = 21
TRAFFIC_TYPE = 22
VERBATIM = 23
LOSS_PRIORITY = 24
ROUTING_INSTANCE = 25
PRECEDENCE = 26
SINTERFACE = 27
EXPIRATION = 28
DINTERFACE = 29
PLATFORM = 30
PLATFORMEXCLUDE = 31
PORT = 32
TIMEOUT = 33
OWNER = 34
PRINCIPALS = 35
ADDREXCLUDE = 36
VPN = 37
APPLY_GROUPS = 38
APPLY_GROUPS_EXCEPT = 39
DSCP_SET = 40
DSCP_MATCH = 41
DSCP_EXCEPT = 42
FORWARDING_CLASS = 43
STAG = 44
DTAG = 45
NEXT_IP = 46
HOP_LIMIT = 47
def __init__(self, var_type, value):
self.var_type = var_type
if self.var_type == self.COMMENT:
# remove the double quotes
comment = value.strip('"')
# make all of the lines start w/o leading whitespace.
self.value = '\n'.join([x.lstrip() for x in comment.splitlines()])
else:
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.var_type == other.var_type and self.value == other.value
class Header(object):
"""The header of the policy file contains the targets and a global comment."""
def __init__(self):
self.target = []
self.comment = []
self.apply_groups = []
self.apply_groups_except = []
def AddObject(self, obj):
"""Add and object to the Header.
Args:
obj: of type VarType.COMMENT, VarType.APPLY_GROUPS,
VarType.APPLY_GROUPS_EXCEPT, or Target
Raises:
RuntimeError: if object type cannot be determined
"""
if type(obj) == Target:
self.target.append(obj)
elif isinstance(obj, list) and all(isinstance(x, VarType) for x in obj):
for x in obj:
if x.var_type == VarType.APPLY_GROUPS:
self.apply_groups.append(str(x))
elif x.var_type == VarType.APPLY_GROUPS_EXCEPT:
self.apply_groups_except.append(str(x))
elif obj.var_type == VarType.COMMENT:
self.comment.append(str(obj))
else:
raise RuntimeError('Unable to add object from header.')
@property
def platforms(self):
"""The platform targets of this particular header."""
return map(lambda x: x.platform, self.target)
def FilterOptions(self, platform):
"""Given a platform return the options.
Args:
platform: string
Returns:
list or None
"""
for target in self.target:
if target.platform == platform:
return target.options
return []
def FilterName(self, platform):
"""Given a filter_type, return the filter name.
Args:
platform: string
Returns:
filter_name: string or None
Notes:
!! Deprecated in favor of Header.FilterOptions(platform) !!
"""
for target in self.target:
if target.platform == platform:
if target.options:
return target.options[0]
return None
def __str__(self):
return 'Target[%s], Comments [%s], Apply groups: [%s], except: [%s]' % (
', '.join(map(str, self.target)),
', '.join(self.comment),
', '.join(self.apply_groups),
', '.join(self.apply_groups_except))
def __repr__(self):
return self.__str__()
def __eq__(self, obj):
"""Compares for equality against another Header object.
Note that it is picky and requires the list contents to be in the
same order.
Args:
obj: object to be compared to for equality.
Returns:
True if all the list member variables of this object are equal to the list
member variables of obj and False otherwise.
"""
if not isinstance(obj, Header):
return False
if self.target != obj.target:
return False
if self.comment != obj.comment:
return False
if self.apply_groups != obj.apply_groups:
return False
if self.apply_groups_except != obj.apply_groups_except:
return False
return True
# This could be a VarType object, but I'm keeping it as it's class
# b/c we're almost certainly going to have to do something more exotic with
# it shortly to account for various rendering options like default iptables
# policies or output file names, etc. etc.
class Target(object):
"""The type of acl to be rendered from this policy file."""
def __init__(self, target):
self.platform = target[0]
if len(target) > 1:
self.options = target[1:]
else:
self.options = None
def __str__(self):
return self.platform
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.platform == other.platform and self.options == other.options
def __ne__(self, other):
return not self.__eq__(other)
# Lexing/Parsing starts here
tokens = (
'ACTION',
'ADDR',
'ADDREXCLUDE',
'COMMENT',
'COUNTER',
'DADDR',
'DADDREXCLUDE',
'DINTERFACE',
'DPFX',
'DPORT',
'DQUOTEDSTRING',
'DSCP',
'DSCP_EXCEPT',
'DSCP_MATCH',
'DSCP_RANGE',
'DSCP_SET',
'DTAG',
'ESCAPEDSTRING',
'ETHER_TYPE',
'EXPIRATION',
'FORWARDING_CLASS',
'FRAGMENT_OFFSET',
'HOP_LIMIT',
'APPLY_GROUPS',
'APPLY_GROUPS_EXCEPT',
'HEADER',
'ICMP_TYPE',
'INTEGER',
'LOGGING',
'LOSS_PRIORITY',
'NEXT_IP',
'OPTION',
'OWNER',
'PACKET_LEN',
'PLATFORM',
'PLATFORMEXCLUDE',
'POLICER',
'PORT',
'PRECEDENCE',
'PRINCIPALS',
'PROTOCOL',
'PROTOCOL_EXCEPT',
'QOS',
'ROUTING_INSTANCE',
'SADDR',
'SADDREXCLUDE',
'SINTERFACE',
'SPFX',
'SPORT',
'STAG',
'STRING',
'TARGET',
'TERM',
'TIMEOUT',
'TRAFFIC_TYPE',
'VERBATIM',
'VPN',
)
literals = r':{},-'
t_ignore = ' \t'
reserved = {
'action': 'ACTION',
'address': 'ADDR',
'address-exclude': 'ADDREXCLUDE',
'comment': 'COMMENT',
'counter': 'COUNTER',
'destination-address': 'DADDR',
'destination-exclude': 'DADDREXCLUDE',
'destination-interface': 'DINTERFACE',
'destination-prefix': 'DPFX',
'destination-port': 'DPORT',
'destination-tag': 'DTAG',
'dscp-except': 'DSCP_EXCEPT',
'dscp-match': 'DSCP_MATCH',
'dscp-set': 'DSCP_SET',
'ether-type': 'ETHER_TYPE',
'expiration': 'EXPIRATION',
'forwarding-class': 'FORWARDING_CLASS',
'fragment-offset': 'FRAGMENT_OFFSET',
'hop-limit': 'HOP_LIMIT',
'apply-groups': 'APPLY_GROUPS',
'apply-groups-except': 'APPLY_GROUPS_EXCEPT',
'header': 'HEADER',
'icmp-type': 'ICMP_TYPE',
'logging': 'LOGGING',
'loss-priority': 'LOSS_PRIORITY',
'next-ip': 'NEXT_IP',
'option': 'OPTION',
'owner': 'OWNER',
'packet-length': 'PACKET_LEN',
'platform': 'PLATFORM',
'platform-exclude': 'PLATFORMEXCLUDE',
'policer': 'POLICER',
'port': 'PORT',
'precedence': 'PRECEDENCE',
'principals': 'PRINCIPALS',
'protocol': 'PROTOCOL',
'protocol-except': 'PROTOCOL_EXCEPT',
'qos': 'QOS',
'routing-instance': 'ROUTING_INSTANCE',
'source-address': 'SADDR',
'source-exclude': 'SADDREXCLUDE',
'source-interface': 'SINTERFACE',
'source-prefix': 'SPFX',
'source-port': 'SPORT',
'source-tag': 'STAG',
'target': 'TARGET',
'term': 'TERM',
'timeout': 'TIMEOUT',
'traffic-type': 'TRAFFIC_TYPE',
'verbatim': 'VERBATIM',
'vpn': 'VPN',
}
# disable linting warnings for lexx/yacc code
# pylint: disable=unused-argument,invalid-name,g-short-docstring-punctuation
# pylint: disable=g-docstring-quotes,g-short-docstring-space
# pylint: disable=g-space-before-docstring-summary,g-doc-args
# pylint: disable=g-no-space-after-docstring-summary
# pylint: disable=g-docstring-missing-newline
def t_IGNORE_COMMENT(t):
r'\#.*'
pass
def t_ESCAPEDSTRING(t):
r'"([^"\\]*(?:\\"[^"\\]*)+)"'
t.lexer.lineno += str(t.value).count('\n')
return t
def t_DQUOTEDSTRING(t):
r'"[^"]*?"'
t.lexer.lineno += str(t.value).count('\n')
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
print "Illegal character '%s' on line %s" % (t.value[0], t.lineno)
t.lexer.skip(1)
def t_DSCP_RANGE(t):
# pylint: disable=line-too-long
r'\b((b[0-1]{6})|(af[1-4]{1}[1-3]{1})|(be)|(ef)|(cs[0-7]{1}))([-]{1})((b[0-1]{6})|(af[1-4]{1}[1-3]{1})|(be)|(ef)|(cs[0-7]{1}))\b'
t.type = reserved.get(t.value, 'DSCP_RANGE')
return t
def t_DSCP(t):
r'\b((b[0-1]{6})|(af[1-4]{1}[1-3]{1})|(be)|(ef)|(cs[0-7]{1}))\b'
t.type = reserved.get(t.value, 'DSCP')
return t
def t_INTEGER(t):
r'\d+'
return t
def t_STRING(t):
r'\w+([-_+.@/]\w*)*'
# we have an identifier; let's check if it's a keyword or just a string.
t.type = reserved.get(t.value, 'STRING')
return t
###
## parser starts here
###
def p_target(p):
""" target : target header terms
| """
if len(p) > 1:
if type(p[1]) is Policy:
p[1].AddFilter(p[2], p[3])
p[0] = p[1]
else:
p[0] = Policy(p[2], p[3])
def p_header(p):
""" header : HEADER '{' header_spec '}' """
p[0] = p[3]
def p_header_spec(p):
""" header_spec : header_spec target_spec
| header_spec comment_spec
| header_spec apply_groups_spec
| header_spec apply_groups_except_spec
| """
if len(p) > 1:
if type(p[1]) == Header:
p[1].AddObject(p[2])
p[0] = p[1]
else:
p[0] = Header()
p[0].AddObject(p[2])
# we may want to change this at some point if we want to be clever with things
# like being able to set a default input/output policy for iptables policies.
def p_target_spec(p):
""" target_spec : TARGET ':' ':' strings_or_ints """
p[0] = Target(p[4])
def p_terms(p):
""" terms : terms TERM STRING '{' term_spec '}'
| """
if len(p) > 1:
p[5].name = p[3]
if type(p[1]) == list:
p[1].append(p[5])
p[0] = p[1]
else:
p[0] = [p[5]]
def p_term_spec(p):
""" term_spec : term_spec action_spec
| term_spec addr_spec
| term_spec comment_spec
| term_spec counter_spec
| term_spec dscp_set_spec
| term_spec dscp_match_spec
| term_spec dscp_except_spec
| term_spec ether_type_spec
| term_spec exclude_spec
| term_spec expiration_spec
| term_spec forwarding_class_spec
| term_spec fragment_offset_spec
| term_spec hop_limit_spec
| term_spec icmp_type_spec
| term_spec interface_spec
| term_spec logging_spec
| term_spec losspriority_spec
| term_spec next_ip_spec
| term_spec option_spec
| term_spec owner_spec
| term_spec packet_length_spec
| term_spec platform_spec
| term_spec policer_spec
| term_spec port_spec
| term_spec precedence_spec
| term_spec principals_spec
| term_spec prefix_list_spec
| term_spec protocol_spec
| term_spec qos_spec
| term_spec routinginstance_spec
| term_spec tag_list_spec
| term_spec timeout_spec
| term_spec traffic_type_spec
| term_spec verbatim_spec
| term_spec vpn_spec
| """
if len(p) > 1:
if type(p[1]) == Term:
p[1].AddObject(p[2])
p[0] = p[1]
else:
p[0] = Term(p[2])
def p_routinginstance_spec(p):
""" routinginstance_spec : ROUTING_INSTANCE ':' ':' STRING """
p[0] = VarType(VarType.ROUTING_INSTANCE, p[4])
def p_losspriority_spec(p):
""" losspriority_spec : LOSS_PRIORITY ':' ':' STRING """
p[0] = VarType(VarType.LOSS_PRIORITY, p[4])
def p_precedence_spec(p):
""" precedence_spec : PRECEDENCE ':' ':' one_or_more_ints """
p[0] = VarType(VarType.PRECEDENCE, p[4])
def p_forwarding_class_spec(p):
""" forwarding_class_spec : FORWARDING_CLASS ':' ':' STRING """
p[0] = VarType(VarType.FORWARDING_CLASS, p[4])
def p_next_ip_spec(p):
""" next_ip_spec : NEXT_IP ':' ':' STRING """
p[0] = VarType(VarType.NEXT_IP, p[4])
def p_icmp_type_spec(p):
""" icmp_type_spec : ICMP_TYPE ':' ':' one_or_more_strings """
p[0] = VarType(VarType.ICMP_TYPE, p[4])
def p_packet_length_spec(p):
""" packet_length_spec : PACKET_LEN ':' ':' INTEGER
| PACKET_LEN ':' ':' INTEGER '-' INTEGER """
if len(p) == 5:
p[0] = VarType(VarType.PACKET_LEN, str(p[4]))
else:
p[0] = VarType(VarType.PACKET_LEN, str(p[4]) + '-' + str(p[6]))
def p_fragment_offset_spec(p):
""" fragment_offset_spec : FRAGMENT_OFFSET ':' ':' INTEGER
| FRAGMENT_OFFSET ':' ':' INTEGER '-' INTEGER """
if len(p) == 5:
p[0] = VarType(VarType.FRAGMENT_OFFSET, str(p[4]))
else:
p[0] = VarType(VarType.FRAGMENT_OFFSET, str(p[4]) + '-' + str(p[6]))
def p_hop_limit_spec(p):
""" hop_limit_spec : HOP_LIMIT ':' ':' INTEGER
| HOP_LIMIT ':' ':' INTEGER '-' INTEGER """
if len(p) == 5:
p[0] = VarType(VarType.HOP_LIMIT, str(p[4]))
else:
p[0] = VarType(VarType.HOP_LIMIT, str(p[4]) + '-' + str(p[6]))
def p_one_or_more_dscps(p):
""" one_or_more_dscps : one_or_more_dscps DSCP_RANGE
| one_or_more_dscps DSCP
| one_or_more_dscps INTEGER
| DSCP_RANGE
| DSCP
| INTEGER """
if len(p) > 1:
if type(p[1]) is list:
p[1].append(p[2])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_dscp_set_spec(p):
""" dscp_set_spec : DSCP_SET ':' ':' DSCP
| DSCP_SET ':' ':' INTEGER """
p[0] = VarType(VarType.DSCP_SET, p[4])
def p_dscp_match_spec(p):
""" dscp_match_spec : DSCP_MATCH ':' ':' one_or_more_dscps """
p[0] = []
for dscp in p[4]:
p[0].append(VarType(VarType.DSCP_MATCH, dscp))
def p_dscp_except_spec(p):
""" dscp_except_spec : DSCP_EXCEPT ':' ':' one_or_more_dscps """
p[0] = []
for dscp in p[4]:
p[0].append(VarType(VarType.DSCP_EXCEPT, dscp))
def p_exclude_spec(p):
""" exclude_spec : SADDREXCLUDE ':' ':' one_or_more_strings
| DADDREXCLUDE ':' ':' one_or_more_strings
| ADDREXCLUDE ':' ':' one_or_more_strings
| PROTOCOL_EXCEPT ':' ':' one_or_more_strings """
p[0] = []
for ex in p[4]:
if p[1].find('source-exclude') >= 0:
p[0].append(VarType(VarType.SADDREXCLUDE, ex))
elif p[1].find('destination-exclude') >= 0:
p[0].append(VarType(VarType.DADDREXCLUDE, ex))
elif p[1].find('address-exclude') >= 0:
p[0].append(VarType(VarType.ADDREXCLUDE, ex))
elif p[1].find('protocol-except') >= 0:
p[0].append(VarType(VarType.PROTOCOL_EXCEPT, ex))
def p_prefix_list_spec(p):
""" prefix_list_spec : DPFX ':' ':' one_or_more_strings
| SPFX ':' ':' one_or_more_strings """
p[0] = []
for pfx in p[4]:
if p[1].find('source-prefix') >= 0:
p[0].append(VarType(VarType.SPFX, pfx))
elif p[1].find('destination-prefix') >= 0:
p[0].append(VarType(VarType.DPFX, pfx))
def p_addr_spec(p):
""" addr_spec : SADDR ':' ':' one_or_more_strings
| DADDR ':' ':' one_or_more_strings
| ADDR ':' ':' one_or_more_strings """
p[0] = []
for addr in p[4]:
if p[1].find('source-address') >= 0:
p[0].append(VarType(VarType.SADDRESS, addr))
elif p[1].find('destination-address') >= 0:
p[0].append(VarType(VarType.DADDRESS, addr))
else:
p[0].append(VarType(VarType.ADDRESS, addr))
def p_port_spec(p):
""" port_spec : SPORT ':' ':' one_or_more_strings
| DPORT ':' ':' one_or_more_strings
| PORT ':' ':' one_or_more_strings """
p[0] = []
for port in p[4]:
if p[1].find('source-port') >= 0:
p[0].append(VarType(VarType.SPORT, port))
elif p[1].find('destination-port') >= 0:
p[0].append(VarType(VarType.DPORT, port))
else:
p[0].append(VarType(VarType.PORT, port))
def p_protocol_spec(p):
""" protocol_spec : PROTOCOL ':' ':' strings_or_ints """
p[0] = []
for proto in p[4]:
p[0].append(VarType(VarType.PROTOCOL, proto))
def p_tag_list_spec(p):
""" tag_list_spec : DTAG ':' ':' one_or_more_strings
| STAG ':' ':' one_or_more_strings """
p[0] = []
for tag in p[4]:
if p[1].find('source-tag') >= 0:
p[0].append(VarType(VarType.STAG, tag))
elif p[1].find('destination-tag') >= 0:
p[0].append(VarType(VarType.DTAG, tag))
def p_ether_type_spec(p):
""" ether_type_spec : ETHER_TYPE ':' ':' one_or_more_strings """
p[0] = []
for proto in p[4]:
p[0].append(VarType(VarType.ETHER_TYPE, proto))
def p_traffic_type_spec(p):
""" traffic_type_spec : TRAFFIC_TYPE ':' ':' one_or_more_strings """
p[0] = []
for proto in p[4]:
p[0].append(VarType(VarType.TRAFFIC_TYPE, proto))
def p_policer_spec(p):
""" policer_spec : POLICER ':' ':' STRING """
p[0] = VarType(VarType.POLICER, p[4])
def p_logging_spec(p):
""" logging_spec : LOGGING ':' ':' STRING """
p[0] = VarType(VarType.LOGGING, p[4])
def p_option_spec(p):
""" option_spec : OPTION ':' ':' one_or_more_strings """
p[0] = []
for opt in p[4]:
p[0].append(VarType(VarType.OPTION, opt))
def p_principals_spec(p):
""" principals_spec : PRINCIPALS ':' ':' one_or_more_strings """
p[0] = []
for opt in p[4]:
p[0].append(VarType(VarType.PRINCIPALS, opt))
def p_action_spec(p):
""" action_spec : ACTION ':' ':' STRING """
p[0] = VarType(VarType.ACTION, p[4])
def p_counter_spec(p):
""" counter_spec : COUNTER ':' ':' STRING """
p[0] = VarType(VarType.COUNTER, p[4])
def p_expiration_spec(p):
""" expiration_spec : EXPIRATION ':' ':' INTEGER '-' INTEGER '-' INTEGER """
p[0] = VarType(VarType.EXPIRATION, datetime.date(int(p[4]),
int(p[6]),
int(p[8])))
def p_comment_spec(p):
""" comment_spec : COMMENT ':' ':' DQUOTEDSTRING """
p[0] = VarType(VarType.COMMENT, p[4])
def p_owner_spec(p):
""" owner_spec : OWNER ':' ':' STRING """
p[0] = VarType(VarType.OWNER, p[4])
def p_verbatim_spec(p):
""" verbatim_spec : VERBATIM ':' ':' STRING DQUOTEDSTRING
| VERBATIM ':' ':' STRING ESCAPEDSTRING """
p[0] = VarType(VarType.VERBATIM, [p[4], p[5].strip('"').replace('\\"', '"')])
def p_vpn_spec(p):
""" vpn_spec : VPN ':' ':' STRING STRING
| VPN ':' ':' STRING """
if len(p) == 6:
p[0] = VarType(VarType.VPN, [p[4], p[5]])
else:
p[0] = VarType(VarType.VPN, [p[4], ''])
def p_qos_spec(p):
""" qos_spec : QOS ':' ':' STRING """
p[0] = VarType(VarType.QOS, p[4])
def p_interface_spec(p):
""" interface_spec : SINTERFACE ':' ':' STRING
| DINTERFACE ':' ':' STRING """
if p[1].find('source-interface') >= 0:
p[0] = VarType(VarType.SINTERFACE, p[4])
elif p[1].find('destination-interface') >= 0:
p[0] = VarType(VarType.DINTERFACE, p[4])
def p_platform_spec(p):
""" platform_spec : PLATFORM ':' ':' one_or_more_strings
| PLATFORMEXCLUDE ':' ':' one_or_more_strings """
p[0] = []
for platform in p[4]:
if p[1].find('platform-exclude') >= 0:
p[0].append(VarType(VarType.PLATFORMEXCLUDE, platform))
elif p[1].find('platform') >= 0:
p[0].append(VarType(VarType.PLATFORM, platform))
def p_apply_groups_spec(p):
""" apply_groups_spec : APPLY_GROUPS ':' ':' one_or_more_strings """
p[0] = []
for group in p[4]:
p[0].append(VarType(VarType.APPLY_GROUPS, group))
def p_apply_groups_except_spec(p):
""" apply_groups_except_spec : APPLY_GROUPS_EXCEPT ':' ':' one_or_more_strings
"""
p[0] = []
for group_except in p[4]:
p[0].append(VarType(VarType.APPLY_GROUPS_EXCEPT, group_except))
def p_timeout_spec(p):
""" timeout_spec : TIMEOUT ':' ':' INTEGER """
p[0] = VarType(VarType.TIMEOUT, p[4])
def p_one_or_more_strings(p):
""" one_or_more_strings : one_or_more_strings STRING
| STRING
| """
if len(p) > 1:
if type(p[1]) == type([]):
p[1].append(p[2])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_one_or_more_ints(p):
""" one_or_more_ints : one_or_more_ints INTEGER
| INTEGER
| """
if len(p) > 1:
if type(p[1]) == type([]):
p[1].append(p[2])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_strings_or_ints(p):
""" strings_or_ints : strings_or_ints STRING
| strings_or_ints INTEGER
| STRING
| INTEGER
| """
if len(p) > 1:
if type(p[1]) is list:
p[1].append(p[2])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_error(p):
"""."""
next_token = yacc.token()
if next_token is None:
use_token = 'EOF'
else:
use_token = repr(next_token.value)
if p:
raise ParseError(' ERROR on "%s" (type %s, line %d, Next %s)'
% (p.value, p.type, p.lineno, use_token))
else:
raise ParseError(' ERROR you likely have unablanaced "{"\'s')
# pylint: enable=unused-argument,invalid-name,g-short-docstring-punctuation
# pylint: enable=g-docstring-quotes,g-short-docstring-space
# pylint: enable=g-space-before-docstring-summary,g-doc-args
# pylint: enable=g-no-space-after-docstring-summary
# pylint: enable=g-docstring-missing-newline
def _ReadFile(filename):
"""Read data from a file if it exists.
Args:
filename: str - Filename
Returns:
data: str contents of file.
Raises:
FileNotFoundError: if requested file does not exist.
FileReadError: Any error resulting from trying to open/read file.
"""
logging.debug('ReadFile(%s)', filename)
if os.path.exists(filename):
try:
data = open(filename, 'r').read()
return data
except IOError:
raise FileReadError('Unable to open or read file %s' % filename)
else:
raise FileNotFoundError('Unable to open policy file %s' % filename)
def _Preprocess(data, max_depth=5, base_dir=''):
"""Search input for include statements and import specified include file.
Search input for include statements and if found, import specified file
and recursively search included data for includes as well up to max_depth.
Args:
data: A string of Policy file data.
max_depth: Maximum depth of included files
base_dir: Base path string where to look for policy or include files
Returns:
A string containing result of the processed input data
Raises:
RecursionTooDeepError: nested include files exceed maximum
"""
if not max_depth:
raise RecursionTooDeepError('%s' % (
'Included files exceed maximum recursion depth of %s.' % max_depth))
rval = []
for line in [x.rstrip() for x in data.splitlines()]:
words = line.split()
if len(words) > 1 and words[0] == '#include':
# remove any quotes around included filename
include_file = words[1].strip('\'"')
data = _ReadFile(os.path.join(base_dir, include_file))
# recursively handle includes in included data
inc_data = _Preprocess(data, max_depth - 1, base_dir=base_dir)
rval.extend(inc_data)
else:
rval.append(line)
return rval
def ParseFile(filename, definitions=None, optimize=True, base_dir='',
shade_check=False):
"""Parse the policy contained in file, optionally provide a naming object.
Read specified policy file and parse into a policy object.
Args:
filename: Name of policy file to parse.
definitions: optional naming library definitions object.
optimize: bool - whether to summarize networks and services.
base_dir: base path string to look for acls or include files.
shade_check: bool - whether to raise an exception when a term is shaded.
Returns:
policy object or False (if parse error).
"""
data = _ReadFile(filename)
p = ParsePolicy(data, definitions, optimize, base_dir=base_dir,
shade_check=shade_check)
return p
def ParsePolicy(data, definitions=None, optimize=True, base_dir='',
shade_check=False):
"""Parse the policy in 'data', optionally provide a naming object.
Parse a blob of policy text into a policy object.
Args:
data: a string blob of policy data to parse.
definitions: optional naming library definitions object.
optimize: bool - whether to summarize networks and services.
base_dir: base path string to look for acls or include files.
shade_check: bool - whether to raise an exception when a term is shaded.
Returns:
policy object or False (if parse error).
"""
try:
if definitions:
globals()['DEFINITIONS'] = definitions
else:
globals()['DEFINITIONS'] = naming.Naming(DEFAULT_DEFINITIONS)
if not optimize:
globals()['_OPTIMIZE'] = False
if shade_check:
globals()['_SHADE_CHECK'] = True
lexer = lex.lex()
preprocessed_data = '\n'.join(_Preprocess(data, base_dir=base_dir))
p = yacc.yacc(write_tables=False, debug=0, errorlog=yacc.NullLogger())
return p.parse(preprocessed_data, lexer=lexer)
except IndexError:
return False
# if you call this from the command line, you can specify a pol file for it to
# read.
if __name__ == '__main__':
ret = 0
if len(sys.argv) > 1:
try:
ret = ParsePolicy(open(sys.argv[1], 'r').read())
except IOError:
print('ERROR: \'%s\' either does not exist or is not readable' %
(sys.argv[1]))
ret = 1
else:
# default to reading stdin
ret = ParsePolicy(sys.stdin.read())
sys.exit(ret)
| {
"content_hash": "0362dfdabf4dc2732fc5c41d8f9e7276",
"timestamp": "",
"source": "github",
"line_count": 2189,
"max_line_length": 131,
"avg_line_length": 30.97898583828232,
"alnum_prop": 0.6057245660861487,
"repo_name": "pettai/capirca",
"id": "6a8f46fab958ceb687ceea188a1dcf6d8d4de2a9",
"size": "68412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303850"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
"""## Loss operations for use in neural networks.
The loss ops measure error for use in neural networks. These losses
can be used for measuring accuracy of a network in a regression task
or for regularization purposes (e.g., weight decay).
These loss ops are, by design, minimal, enabling flexibility in how
their output can be used.
@@reduce_batch_sum
@@reduce_batch_mean
@@absolute_loss
@@squared_loss
@@sum_squared_loss
@@mean_absolute_loss
@@mean_squared_loss
@@root_mean_squared_loss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = ["reduce_batch_sum", "reduce_batch_mean", "absolute_loss",
"squared_loss", "sum_squared_loss", "mean_absolute_loss",
"mean_squared_loss", "root_mean_squared_loss"]
def _reduce_batch(x, reduce_fn, name=None):
"""Given a tensor `x`, calls reduce_fn to reduce it across dimensions.
Given a tensor with number of dimensions > 1, _reduce_batch will reduce the
tensor across all dimensions except for dimension 0. As an example, given a
tensor of shape [batch_size, d1, d2], this function will reduce across
dimensions d1 and d2, returning a tensor of shape [batch_size].
Tensors of dimension 1 are returned as-is, while tensors of dimension 0
raise a ValueError.
Args:
x: A `Tensor` with dimension > 0.
reduce_fn: A math_ops reduce function that takes arguments of
`x`, `reduction_indices`, and `name`.
name: A name for the operation (optional).
Returns:
A `Tensor` with values reduced by reduce_fn across all dimensions > 0.
Raises:
ValueError: If `x` has dimension 0.
"""
x = ops.convert_to_tensor(x, name="x")
with ops.op_scope([x], name, "reduce_batch"):
ndims = x.get_shape().ndims
if ndims == 0:
raise ValueError("Cannot reduce a scalar into batches.")
elif ndims == 1:
return x # Don't include a useless reduction.
elif ndims:
reduction_indices = list(range(1, ndims))
shape = [x.get_shape().dims[0]]
else:
reduction_indices = math_ops.range(1, array_ops.size(array_ops.shape(x)))
shape = [None] # We don't know much about the shape, but it is rank 1.
result = reduce_fn(x, reduction_indices=reduction_indices)
# Give a shape hint in case we have extra information.
result.set_shape(shape)
return result
def reduce_batch_sum(x, name=None):
"""Given a tensor `x`, sums across all dimensions except dimension 0.
Given a tensor with the number of dimensions > 1, reduce_batch_sum
will sum across all dimensions except for dimension 0. This function
is useful for summing the loss (error) across all examples in a
batch when training. As an example, given a tensor of shape
[batch_size, d1, d2], this function will sum across dimensions d1
and d2, returning a tensor of shape [batch_size].
Tensors of dimension 1 are returned as-is, while tensors of dimension 0
raise a ValueError.
Args:
x: A `Tensor` with dimension > 0.
name: A name for the operation (optional).
Returns:
A `Tensor` with values summed across all dimensions > 0.
Raises:
ValueError: If `x` has dimension 0.
"""
return _reduce_batch(x, math_ops.reduce_sum, name)
def reduce_batch_mean(x, name=None):
"""Given a tensor `x`, returns the mean across all dimensions except dim 0.
Given a tensor with the number of dimensions > 1, reduce_batch_mean
will calculate the mean across all dimensions except for dimension
0. This function is useful for calculating the mean loss (error)
across all examples in a batch when training. As an example, given a
tensor of shape [batch_size, d1, d2], this function will calculate
the mean across dimensions d1 and d2, returning a tensor of shape
[batch_size].
Tensors of dimension 1 are returned as-is.
Args:
x: A `Tensor` with dimension > 0.
name: A name for the operation (optional).
Returns:
A `Tensor` with values averaged across all dimensions > 0.
Raises:
ValueError: If `x` has dimension 0.
"""
return _reduce_batch(x, math_ops.reduce_mean, name)
def absolute_loss(predicted, target, name=None):
"""Computes and returns the per-example absolute loss.
Computes the per-example absolute value of the difference between
the target and predicted tensors. The tensors must have the same
shape.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size, dim_1, ..., dim_n]` tensor of per-example absolute losses.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target], name, "absolute_loss") as scope:
predicted = ops.convert_to_tensor(predicted, name="predicted")
target = ops.convert_to_tensor(target, name="target")
predicted.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.abs(target - predicted, name=scope)
def squared_loss(predicted, target, name=None):
"""Computes and returns the per-example squared loss.
Computes the per-example squared difference between the target and
predicted tensors. The tensors must have the same shape.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size, dim_1, ..., dim_n]` tensor of per-example squared losses.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target], name, "squared_loss") as scope:
predicted = ops.convert_to_tensor(predicted, name="predicted")
target = ops.convert_to_tensor(target, name="target")
predicted.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(target - predicted, name=scope)
def sum_squared_loss(predicted, target, name=None):
# pylint: disable=line-too-long
"""Calculates 1/2 the sum of the squared loss across batches.
Computes the squared difference between the target and predicted
tensors, sums across all dimensions except dimension 0, and divides
by 2:
losses = reduce_batch_sum(squared_loss(predicted, target)) / 2.0
where `losses` is a tensor with dimensions [batch_size].
The tensors must have the same shape.
This function is equivalent to typical formulations of L2 loss, and similar
to TensorFlow's l2_loss function. It differs from the l2_loss function
by allowing the caller to specify both the predicted and target tensors.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size]` tensor of squared losses summed across all dimensions
except dimension 0, divided by 2.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
# pylint: enable=line-too-long
with ops.op_scope(
[predicted, target],
name,
"sum_squared_loss") as scope:
return math_ops.div(reduce_batch_sum(squared_loss(predicted, target)),
2.0,
name=scope)
def mean_absolute_loss(predicted, target, name=None):
"""Calculates the mean absolute loss across batches.
Computes the absolute difference between the target and predicted
tensors, averaged across all dimensions except dimension 0:
losses = reduce_batch_mean(absolute_loss(predicted, target))
where `losses` is a tensor with dimensions [batch_size].
The tensors must have the same shape.
This loss function is a form of L1 loss.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size]` tensor of absolute differences, averaged across all
dimensions except dimension 0.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target], name, "mean_absolute_loss") as scope:
return reduce_batch_mean(absolute_loss(predicted, target), name=scope)
def mean_squared_loss(predicted, target, name=None):
"""Calculates the mean squared loss across batches.
Computes the squared difference between the target and predicted
tensors, and averages across all dimensions except dimension 0:
losses = reduce_batch_mean(squared_loss(predicted, target))
where `losses` is a tensor with dimensions [batch_size].
The tensors must have the same shape.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size]` tensor of squared differences, averaged across
all dimensions except dimension 0.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target], name, "mean_squared_loss") as scope:
return reduce_batch_mean(squared_loss(predicted, target), name=scope)
def root_mean_squared_loss(predicted, target, name=None):
"""Calculates the root mean squared loss across batches.
Computes the root mean squared loss between the target and predicted
tensors, which is the square root of the mean squared differences
between the predicted and target tensors:
losses = sqrt(mean_squared_loss(predicted, target))
where `losses` is a tensor with dimensions [batch_size].
The tensors must have the same shape.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size]` tensor of the root mean squared differences.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target],
name,
"root_mean_squared_loss") as scope:
return math_ops.sqrt(mean_squared_loss(predicted, target),
name=scope)
| {
"content_hash": "2ec0080952d6a8e204be4a65e0b05d1a",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 79,
"avg_line_length": 33.90060240963855,
"alnum_prop": 0.6923145268769436,
"repo_name": "moonboots/tensorflow",
"id": "ae3d6203fe57bd0e505db225e8e321291a478006",
"size": "11933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/python/ops/loss_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151546"
},
{
"name": "C++",
"bytes": "7303140"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "678043"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16098"
},
{
"name": "Jupyter Notebook",
"bytes": "777976"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "101760"
},
{
"name": "Python",
"bytes": "4092357"
},
{
"name": "Shell",
"bytes": "77957"
},
{
"name": "TypeScript",
"bytes": "328860"
}
],
"symlink_target": ""
} |
"""
// The DoTurn function is where your code goes. The PlanetWars object contains
// the state of the game, including information about all planets and fleets
// that currently exist. Inside this function, you issue orders using the
// pw.IssueOrder() function. For example, to send 10 ships from planet 3 to
// planet 8, you would say pw.IssueOrder(3, 8, 10).
//
// There is already a basic strategy in place here. You can use it as a
// starting point, or you can throw it out entirely and replace it with your
// own. Check out the tutorials and articles on the contest website at
// http://www.ai-contest.com/resources.
"""
try:
from utils import Debuggable
except:
import sys
from os import path
sys.path.append(path.join(sys.path[0], ".."))
from utils import Debuggable
import math
from PlanetWars import PlanetWars
class Debug(Debuggable):
def __init__(self):
super(Debug, self).__init__()
self.debug_name = "bot1"
debugger = Debug()
def main():
map_data = ''
turn = 0
try:
while(True):
current_line = input()
#debug("-> %s" % current_line, 'server')
if len(current_line) >= 2 and current_line.startswith("go"):
debugger.debug("# %d" % turn)
pw = PlanetWars(map_data)
DoTurn(pw)
#debug("Answer is sent.", 'server')
pw.FinishTurn()
turn += 1
map_data = ''
else:
map_data += current_line + '\n'
except Exception as e:
debugger.debug(e, "error")
debugger.debug("The End!")
turn = 0
def DoTurn(pw):
ships_key_getter=lambda a: a.NumShips()
my_planets = pw.MyPlanets()
my_planets.sort(key=ships_key_getter, reverse=True)
other_planets = pw.NotMyPlanets()
other_planets.sort(key=ships_key_getter)
for p in my_planets:
counter = 0
can_give = float(p.NumShips() * 0.66)
#debug("My Planet %d, init can give %f" % (p.PlanetID(), can_give))
for op in other_planets:
need = float(op.NumShips())
owner = op.Owner()
if owner > 1:
need += pw.Distance(p.PlanetID(), op.PlanetID()) * op.GrowthRate()
enemy_fleet_count = [fleet.NumShips() for fleet in pw.EnemyFleets() if fleet.DestinationPlanet() == op.PlanetID()]
need += sum(enemy_fleet_count)
my_fleet = [fleet for fleet in pw.MyFleets() if fleet.DestinationPlanet() == op.PlanetID()]
my_fleet.sort(key=lambda fleet: fleet.TurnsRemaining())
pluss = need
for fleet in my_fleet:
accum = 0
if owner > 1:
accum = op.GrowthRate() * fleet.TurnsRemaining()
pluss += accum
pluss -= fleet.NumShips()
if pluss <= 0:
break
if pluss < 0:
# debug("There will be enought help. Don't need us.")
continue
need = pluss * 1.10
ships_to_send = int(math.ceil(need))
if ships_to_send > 0 and can_give >= ships_to_send:
pid, opid, num = p.PlanetID(), op.PlanetID(), ships_to_send
has_now = pw.GetPlanet(pid).NumShips()
#debug("src %d, dst %d, ships %d\t %d has %d ships" % (pid, opid, num, pid, has_now))
pw.IssueOrder(pid, opid, num)
counter += num
can_give -= num
debugger.debug("%d sent from %d" % (counter, p.PlanetID()))
if __name__ == '__main__':
try:
import psyco
psyco.full()
except ImportError:
pass
try:
debugger.debug("Let's game begin!!")
main()
except KeyboardInterrupt:
print('ctrl-c, leaving ...')
| {
"content_hash": "388a762043b54e5b6eda00e80bae0386",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 126,
"avg_line_length": 33.43478260869565,
"alnum_prop": 0.552925877763329,
"repo_name": "Dremalka/arbitr",
"id": "ef33487b7f31b4eacb89257ca45b1a4887e239c3",
"size": "3869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/MyBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40035"
}
],
"symlink_target": ""
} |
from sqlalchemy import (
Column,
Index,
Integer,
Text,
)
from .meta import Base
class MyModel(Base):
__tablename__ = 'models'
id = Column(Integer, primary_key=True)
name = Column(Text)
value = Column(Integer)
Index('my_index', MyModel.name, unique=True, mysql_length=255)
| {
"content_hash": "04d669afb488bacbcfc49d0b2f186e07",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 17.166666666666668,
"alnum_prop": 0.6440129449838188,
"repo_name": "shortaj/pyramid-learning-journal",
"id": "d65a01a42231273ddc84880183a142c734690b21",
"size": "309",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "learning_journal/learning_journal/models/mymodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4068"
},
{
"name": "HTML",
"bytes": "7444"
},
{
"name": "Python",
"bytes": "9069"
}
],
"symlink_target": ""
} |
'''
Created on Oct 29, 2015
@author: Kagiu
'''
import sys
from PySide import QtGui, QtCore
from signin import TimeLogger
def boxToWidget(layout):
widget = QtGui.QWidget()
widget.setLayout(layout)
return widget
class SignGUI(QtGui.QMainWindow):
def __init__(self):
super(SignGUI, self).__init__()
self.logger = TimeLogger()
self.setWindowTitle("Metrobots Sign-In")
self.initMain()
self.initAdmin()
self.initMenu()
self.show()
def initMain(self): #Main sign-in code
self.lineEdit = QtGui.QLineEdit(self)
self.lineEdit.setPlaceholderText("Your name here")
self.lineEdit.editingFinished.connect(self.register)
self.autoComplete()
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel("Name:", self))
hbox.addWidget(self.lineEdit)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
self.tabs = QtGui.QTabWidget()
self.tabs.addTab(boxToWidget(vbox), "Sign-in")
self.setCentralWidget(self.tabs)
def initAdmin(self): #Admin code
self.timelist = QtGui.QTreeWidget(self) #Total time
self.timelist.setColumnCount(2)
self.timelist.setHeaderLabels(["Name", "Total time"])
try: self.populateTotal()
except Exception as e: print(e)
refreshBtn = QtGui.QPushButton("Refresh", self)
refreshBtn.clicked.connect(self.populateTotal)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.timelist)
vbox.addWidget(refreshBtn)
self.tabs.addTab(boxToWidget(vbox), "Total time")
self.lookup = QtGui.QLineEdit()
self.timetable = QtGui.QTreeWidget()
self.timetable.setColumnCount(3)
self.timetable.setHeaderLabels(["Time in", "Time out", "Total time"])
vbox2 = QtGui.QVBoxLayout()
vbox2.addWidget(self.lookup)
vbox2.addWidget(self.timetable)
self.tabs.addTab(boxToWidget(vbox2), "Individual time")
self.tabs.setTabEnabled(1, False)
self.tabs.setTabEnabled(2, False)
def initMenu(self):
self.admin = QtGui.QAction("&Enable data access", self)
self.admin.setCheckable(True)
self.admin.triggered.connect(self.enableAdmin)
filemenu = self.menuBar().addMenu("&File")
filemenu.addAction(self.admin)
def enableAdmin(self):
if not self.admin.isChecked():
self.tabs.setTabEnabled(1, False)
self.tabs.setTabEnabled(2, False)
else:
passwd = QtGui.QInputDialog.getText(self, "Password required", "Enter password",
QtGui.QLineEdit.Password)
print(passwd)
if passwd[0] == "3324":
self.tabs.setTabEnabled(1, True)
self.tabs.setTabEnabled(2, True)
else:
QtGui.QMessageBox.warning(self, "Error", "Wrong password.")
self.admin.setChecked(False)
def autoComplete(self):
completer = QtGui.QCompleter(self.logger.names(), self)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
completer.activated.connect(self.register)
self.lineEdit.setCompleter(completer)
def populateTotal(self):
self.timelist.clear()
times = []
for name in self.logger.names():
times.append(QtGui.QTreeWidgetItem([name, str(self.logger.getTime(name))]))
self.timelist.addTopLevelItems(times)
def register(self):
#print(inspect.stack()[1])
name = self.lineEdit.text().title()
if name not in self.logger.names():
reply = QtGui.QMessageBox.question(self, 'Message', name + " is not in the roster. Would you like to add it?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.Yes)
if reply == QtGui.QMessageBox.No:
QtGui.QMessageBox.information(self, "", "Not adding " + name + ".")
return
self.autoComplete()
self.lineEdit.clear()
self.lineEdit.setText("")
status = self.logger.register(name)
self.statusBar().clear()
self.statusBar().showMessage(name + " has been signed " + ("in." if status else "out."))
def main():
app = QtGui.QApplication(sys.argv)
gui = SignGUI()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | {
"content_hash": "a0fa2095f0b35fb27e7f97de8898fe20",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 122,
"avg_line_length": 35.47286821705426,
"alnum_prop": 0.5935314685314685,
"repo_name": "frc3324/Timekeeper",
"id": "9e6a03e05eb505ffccb762d1f49c699624e51dc7",
"size": "4576",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Timekeeper/signgui.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9083"
}
],
"symlink_target": ""
} |
import sys, subprocess, os
from subprocess import call
from tempfile import NamedTemporaryFile
def formdamage(sent):
rectify = []
for ch in sent:
try: rectify.append(ch.encode('utf-8'))
except: pass
return ''.join(rectify)
def cabocha(sent):
if os.path.exists('/home_lab_local/s1010205/tmp/'):
temp = NamedTemporaryFile(delete=False, dir='/home_lab_local/s1010205/tmp/')
else:
temp = NamedTemporaryFile(delete=False)
try: sent = sent.encode('utf-8')
except: sent = formdamage(sent)
temp.write(sent)
temp.close()
command = ['cabocha', '-f', '3']
process = subprocess.Popen(command, stdin=open(temp.name,'r'), stdout=subprocess.PIPE)
output = process.communicate()[0]
os.unlink(temp.name)
return unicode(output, 'utf-8')
def main():
pass
if __name__ == '__main__':
input_sentence = u'私が五年前にこの団体を仲間たちと結成したのはマルコス疑惑などで日本のODA(政府開発援助)が問題になり、国まかせでなく、民間による国際協力が必要だと痛感したのが大きな理由です。'
print cabocha(input_sentence).encode('utf-8')
| {
"content_hash": "bcbbad38239f3cc32d0f4803a0bc17d8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 112,
"avg_line_length": 28.2972972972973,
"alnum_prop": 0.6542502387774594,
"repo_name": "kevincobain2000/jProcessing",
"id": "14ea14310bea38c12862de2fd25e46357bace509",
"size": "1326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jNlp/jCabocha.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "33450527"
},
{
"name": "Python",
"bytes": "62331"
}
],
"symlink_target": ""
} |
"""
Module to test RO manager minim access functions
See: http://www.wf4ever-project.org/wiki/display/docs/RO+management+tool
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os, os.path
import sys
import re
import shutil
import unittest
import logging
import datetime
import StringIO
try:
# Running Python 2.5 with simplejson?
import simplejson as json
except ImportError:
import json
log = logging.getLogger(__name__)
if __name__ == "__main__":
# Add main project directory at start of python path
sys.path.insert(0, "../..")
import rdflib
from MiscUtils import TestUtils
from rocommand import ro_manifest
from rocommand.ro_metadata import ro_metadata
from rocommand.ro_annotation import annotationTypes, annotationPrefixes
from rocommand.ro_prefixes import make_sparql_prefixes
from rocommand.test import TestROSupport
from rocommand.test import TestConfig
from iaeval import ro_minim
from iaeval.ro_minim import MINIM
from iaeval import ro_eval_minim
# Base directory for RO tests in this module
testbase = os.path.dirname(os.path.realpath(__file__))
# Local ro_config for testing
ro_config = {
"annotationTypes": annotationTypes,
"annotationPrefixes": annotationPrefixes
}
# Test suite
class TestEvalQueryMatch(TestROSupport.TestROSupport):
"""
Test ro annotation commands
"""
def setUp(self):
super(TestEvalQueryMatch, self).setUp()
return
def tearDown(self):
super(TestEvalQueryMatch, self).tearDown()
return
# Setup local config for Minim tests
def setupConfig(self):
return self.setupTestBaseConfig(testbase)
# Actual tests follow
def testNull(self):
assert True, 'Null test failed'
def testEvalQueryTestModelMin(self):
"""
Evaluate RO against minimal Minim description using just QueryTestRules
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "test-data-2", "RO test minim", "ro-testMinim")
rouri = ro_manifest.getRoUri(rodir)
self.populateTestRo(testbase, rodir)
rometa = ro_metadata(ro_config, rodir)
(g, evalresult) = ro_eval_minim.evaluate(rometa,
"Minim-UserRequirements2-min.rdf", # Minim file
"data/UserRequirements-astro.ods", # Target resource
"create") # Purpose
log.debug("ro_eval_minim.evaluate result:\n----\n%s"%(repr(evalresult)))
self.assertIn(MINIM.fullySatisfies, evalresult['summary'])
self.assertIn(MINIM.nominallySatisfies, evalresult['summary'])
self.assertIn(MINIM.minimallySatisfies, evalresult['summary'])
self.assertEquals(evalresult['missingMust'], [])
self.assertEquals(evalresult['missingShould'], [])
self.assertEquals(evalresult['missingMay'], [])
self.assertEquals(evalresult['rouri'], rometa.getRoUri())
self.assertEquals(evalresult['minimuri'], rometa.getComponentUri("Minim-UserRequirements2-min.rdf"))
self.assertEquals(evalresult['target'], "data/UserRequirements-astro.ods")
self.assertEquals(evalresult['purpose'], "create")
self.assertEquals(evalresult['constrainturi'],
rometa.getComponentUriAbs("Minim-UserRequirements2-min.rdf#create/data/UserRequirements-astro.ods"))
self.assertEquals(evalresult['modeluri'],
rometa.getComponentUriAbs("Minim-UserRequirements2-min.rdf#runnableRO"))
self.deleteTestRo(rodir)
return
def testEvalQueryTestModelExists(self):
"""
Evaluate RO against minimal Minim description using just QueryTestRules
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "test-data-2", "RO test minim", "ro-testMinim")
rouri = ro_manifest.getRoUri(rodir)
self.populateTestRo(testbase, rodir)
rometa = ro_metadata(ro_config, rodir)
resuri = rometa.getComponentUriAbs("data/UserRequirements-astro.ods")
rometa.addSimpleAnnotation(resuri, "rdfs:label", "Test label")
# Now run evaluation against test RO
(g, evalresult) = ro_eval_minim.evaluate(rometa,
"Minim-UserRequirements2-exists.rdf", # Minim file
"data/UserRequirements-astro.ods", # Target resource
"create") # Purpose
log.debug("ro_eval_minim.evaluate result:\n----\n%s"%(repr(evalresult)))
self.assertIn(MINIM.fullySatisfies, evalresult['summary'])
self.assertIn(MINIM.nominallySatisfies, evalresult['summary'])
self.assertIn(MINIM.minimallySatisfies, evalresult['summary'])
self.assertEquals(evalresult['missingMust'], [])
self.assertEquals(evalresult['missingShould'], [])
self.assertEquals(evalresult['missingMay'], [])
self.assertEquals(evalresult['rouri'], rometa.getRoUri())
self.assertEquals(evalresult['minimuri'], rometa.getComponentUri("Minim-UserRequirements2-exists.rdf"))
self.assertEquals(evalresult['target'], "data/UserRequirements-astro.ods")
self.assertEquals(evalresult['purpose'], "create")
self.assertEquals(evalresult['constrainturi'],
rometa.getComponentUriAbs("Minim-UserRequirements2-exists.rdf#create/data/UserRequirements-astro.ods"))
self.assertEquals(evalresult['modeluri'],
rometa.getComponentUriAbs("Minim-UserRequirements2-exists.rdf#runnableRO"))
self.deleteTestRo(rodir)
return
def testEvalQueryTestModel(self):
"""
Evaluate RO against Minim description using just QueryTestRules
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "test-data-2", "RO test minim", "ro-testMinim")
rouri = ro_manifest.getRoUri(rodir)
self.populateTestRo(testbase, rodir)
rometa = ro_metadata(ro_config, rodir)
resuri = rometa.getComponentUriAbs("data/UserRequirements-astro.ods")
rometa.addSimpleAnnotation(resuri, "rdfs:label", "Test label")
# Now run evaluation against test RO
(g, evalresult) = ro_eval_minim.evaluate(rometa,
"Minim-UserRequirements2.rdf", # Minim file
"data/UserRequirements-astro.ods", # Target resource
"create") # Purpose
log.debug("ro_eval_minim.evaluate result:\n----\n%s"%(repr(evalresult)))
self.assertIn(MINIM.fullySatisfies, evalresult['summary'])
self.assertIn(MINIM.nominallySatisfies, evalresult['summary'])
self.assertIn(MINIM.minimallySatisfies, evalresult['summary'])
self.assertEquals(evalresult['missingMust'], [])
self.assertEquals(evalresult['missingShould'], [])
self.assertEquals(evalresult['missingMay'], [])
self.assertEquals(evalresult['rouri'], rometa.getRoUri())
self.assertEquals(evalresult['minimuri'], rometa.getComponentUri("Minim-UserRequirements2.rdf"))
self.assertEquals(evalresult['target'], "data/UserRequirements-astro.ods")
self.assertEquals(evalresult['purpose'], "create")
self.assertEquals(evalresult['constrainturi'],
rometa.getComponentUriAbs("Minim-UserRequirements2.rdf#create/data/UserRequirements-astro.ods"))
self.assertEquals(evalresult['modeluri'],
rometa.getComponentUriAbs("Minim-UserRequirements2.rdf#runnableRO"))
self.deleteTestRo(rodir)
return
def testEvalQueryTestReportList(self):
"""
Test QueryTestRules reporting list of failed query probes
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "test-data-2", "RO test minim", "ro-testMinim")
rouri = ro_manifest.getRoUri(rodir)
self.populateTestRo(testbase, rodir)
rometa = ro_metadata(ro_config, rodir)
resuri = rometa.getComponentUriAbs("data/NoSuchResource")
rometa.addSimpleAnnotation(resuri, "rdfs:label", "Test label")
# Now run evaluation against test RO
(g, evalresult) = ro_eval_minim.evaluate(rometa,
"Minim-UserRequirements2.rdf", # Minim file
"data/NoSuchResource", # Target resource
"report list") # Purpose
log.debug("ro_eval_minim.evaluate result:\n----\n%s"%(repr(evalresult)))
self.assertNotIn(MINIM.fullySatisfies, evalresult['summary'])
self.assertNotIn(MINIM.nominallySatisfies, evalresult['summary'])
self.assertIn(MINIM.minimallySatisfies, evalresult['summary'])
self.assertEquals(evalresult['missingMust'], [])
self.assertEquals(len(evalresult['missingShould']), 1)
self.assertEquals(evalresult['missingShould'][0][0]['seq'], '04 - aggregates data/NoSuchResource')
self.assertEquals(evalresult['missingMay'], [])
self.assertEquals(evalresult['rouri'], rometa.getRoUri())
self.assertEquals(evalresult['minimuri'], rometa.getComponentUri("Minim-UserRequirements2.rdf"))
self.assertEquals(evalresult['target'], "data/NoSuchResource")
self.assertEquals(evalresult['purpose'], "report list")
self.assertEquals(evalresult['constrainturi'],
rometa.getComponentUriAbs("Minim-UserRequirements2.rdf#report/data/NoSuchResource"))
self.assertEquals(evalresult['modeluri'],
rometa.getComponentUriAbs("Minim-UserRequirements2.rdf#reportList"))
# Check result bindings returned
self.assertEquals(evalresult['missingShould'][0][1]['_count'], 1)
self.assertEquals(evalresult['missingShould'][0][1]['_fileuri'], rometa.getComponentUri("data/NoSuchResource"))
self.assertEquals(evalresult['missingShould'][0][1]['targetres'], rometa.getComponentUri("data/NoSuchResource"))
self.assertEquals(evalresult['missingShould'][0][1]['ro'], str(rometa.getRoUri()))
self.assertEquals(evalresult['missingShould'][0][1]['ro_list'], [str(rometa.getRoUri())])
# Clean up
self.deleteTestRo(rodir)
return
def testEvalQueryTestChembox(self):
"""
Evaluate Chembox data against Minim description using QueryTestRules
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "test-chembox", "RO test minim", "ro-testMinim")
rouri = ro_manifest.getRoUri(rodir)
self.populateTestRo(testbase, rodir)
rometa = ro_metadata(ro_config, rodir)
resuri = rometa.getComponentUriAbs("http://purl.org/net/chembox/Ethane")
rometa.addGraphAnnotation(resuri, "Ethane.ttl")
# Now run evaluation against test RO
(g, evalresult) = ro_eval_minim.evaluate(rometa,
"Minim-chembox.ttl", # Minim file
resuri, # Target resource
"complete") # Purpose
log.debug("ro_eval_minim.evaluate result:\n%s\n----"%(repr(evalresult)))
self.assertNotIn(MINIM.fullySatisfies, evalresult['summary'])
self.assertIn(MINIM.nominallySatisfies, evalresult['summary'])
self.assertIn(MINIM.minimallySatisfies, evalresult['summary'])
self.assertEquals(evalresult['missingMust'], [])
self.assertEquals(evalresult['missingShould'], [])
self.assertEquals(len(evalresult['missingMay']), 1)
self.assertEquals(evalresult['missingMay'][0][0]['seq'], 'Synonym is present')
self.assertEquals(evalresult['rouri'], rometa.getRoUri())
self.assertEquals(evalresult['minimuri'], rometa.getComponentUri("Minim-chembox.ttl"))
self.assertEquals(evalresult['target'], resuri)
self.assertEquals(evalresult['purpose'], "complete")
self.deleteTestRo(rodir)
return
def testEvalQueryTestChemboxFail(self):
"""
Test for failing chembox requirement
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "test-chembox", "RO test minim", "ro-testMinim")
rouri = ro_manifest.getRoUri(rodir)
self.populateTestRo(testbase, rodir)
rometa = ro_metadata(ro_config, rodir)
resuri = rometa.getComponentUriAbs("http://purl.org/net/chembox/Ethane")
rometa.addGraphAnnotation(resuri, "Ethane.ttl")
# Now run evaluation against test RO
(g, evalresult) = ro_eval_minim.evaluate(rometa,
"Minim-chembox.ttl", # Minim file
resuri, # Target resource
"fail") # Purpose
log.debug("ro_eval_minim.evaluate result:\n----\n%s"%(repr(evalresult)))
self.assertNotIn(MINIM.fullySatisfies, evalresult['summary'])
self.assertNotIn(MINIM.nominallySatisfies, evalresult['summary'])
self.assertNotIn(MINIM.minimallySatisfies, evalresult['summary'])
self.assertEquals(len(evalresult['missingMust']), 1)
self.assertEquals(evalresult['missingMust'][0][0]['seq'], 'This test should fail')
self.assertEquals(evalresult['missingShould'], [])
self.assertEquals(evalresult['missingMay'], [])
self.assertEquals(evalresult['rouri'], rometa.getRoUri())
self.assertEquals(evalresult['minimuri'], rometa.getComponentUri("Minim-chembox.ttl"))
self.assertEquals(evalresult['target'], resuri)
self.assertEquals(evalresult['purpose'], "fail")
self.deleteTestRo(rodir)
return
def setupEvalFormat(self):
self.setupConfig()
rodir = self.createTestRo(testbase, "test-chembox", "RO test minim", "ro-testMinim")
rometa = ro_metadata(ro_config, rodir)
minimbase = rometa.getComponentUri("Minim-chembox.ttl")
modeluri = rdflib.URIRef('http://example.com/chembox-samples/minim_model')
resuri = rometa.getComponentUriAbs("http://purl.org/net/chembox/Ethane")
self.satisfied_result_1 = (
{ 'seq': 'ChemSpider identifier is present'
, 'level': 'SHOULD'
, 'uri': rdflib.URIRef('http://example.com/chembox-samples/ChemSpider')
, 'label': None
, 'model': modeluri
, 'querytestrule':
{ 'prefixes':
[ ('chembox', 'http://dbpedia.org/resource/Template:Chembox:')
, ('default', 'http://example.com/chembox-samples/')
]
, 'query': rdflib.Literal('?targetres chembox:ChemSpiderID ?value . FILTER ( str(xsd:integer(?value)) )')
, 'resultmod': None
, 'max': None
, 'min': 1
, 'aggregates_t': None
, 'islive_t': None
, 'exists': None
, 'show': None
, 'showpass': rdflib.Literal('ChemSpider identifier is present')
, 'showfail': rdflib.Literal('No ChemSpider identifier is present')
, 'showmiss': None
}
})
self.satisfied_binding_1 = (
{ '_count': 1
, 'targetro': rometa.getRoUri()
, 'targetres': resuri
})
self.satisfied_result_2 = (
{ 'seq': 'InChI identifier is present'
, 'level': 'MUST'
, 'uri': rdflib.URIRef('http://example.com/chembox-samples/InChI')
, 'label': None
, 'model': modeluri
, 'querytestrule':
{ 'prefixes':
[ ('chembox', 'http://dbpedia.org/resource/Template:Chembox:')
, ('default', 'http://example.com/chembox-samples/')
]
, 'query': rdflib.Literal('?targetres chembox:StdInChI ?value . FILTER ( datatype(?value) = xsd:string )')
, 'resultmod': None
, 'min': 1
, 'max': 1
, 'aggregates_t': None
, 'islive_t': None
, 'exists': None
, 'show': None
, 'showpass': rdflib.Literal('InChI identifier is present')
, 'showfail': rdflib.Literal('No InChI identifier is present')
, 'showmiss': None
}
})
self.satisfied_binding_2 = (
{ '_count': 1
, 'targetro': rometa.getRoUri()
, 'targetres': resuri
})
self.missing_may_result = (
{ 'seq': 'Synonym is present'
, 'level': 'MAY'
, 'uri': rdflib.URIRef('http://example.com/chembox-samples/Synonym')
, 'label': None
, 'model': modeluri
, 'querytestrule':
{ 'prefixes':
[ ('chembox', 'http://dbpedia.org/resource/Template:Chembox:')
, ('default', 'http://example.com/chembox-samples/')
]
, 'query': rdflib.Literal('\n ?targetres chembox:OtherNames ?value .\n ')
, 'resultmod': None
, 'min': 1
, 'max': None
, 'aggregates_t': None
, 'islive_t': None
, 'exists': None
, 'show': None
, 'showpass': rdflib.Literal('Synonym is present')
, 'showfail': rdflib.Literal('No synonym is present')
, 'showmiss': None
}
})
self.missing_may_binding = (
{ '_count': 1
, 'targetro': rometa.getRoUri()
, 'targetres': resuri
})
self.eval_result = (
{ 'summary': [MINIM.nominallySatisfies, MINIM.minimallySatisfies]
, 'missingMust': []
, 'missingShould': []
, 'missingMay': [(self.missing_may_result, self.missing_may_binding)]
, 'satisfied': [ (self.satisfied_result_1, self.satisfied_binding_1)
, (self.satisfied_result_2, self.satisfied_binding_2)
]
, 'rouri': rometa.getRoUri()
, 'roid': rdflib.Literal('ro-testMinim')
, 'title': rdflib.Literal('RO test minim')
, 'description': rdflib.Literal('RO test minim')
, 'target': resuri
, 'purpose': 'complete'
, 'minimuri': rometa.getComponentUri("Minim-chembox.ttl")
, 'constrainturi': rdflib.URIRef('http://example.com/chembox-samples/minim_pass_constraint')
, 'modeluri': modeluri
})
self.deleteTestRo(rodir)
return rodir
def testEvalFormatSummary(self):
rodir = self.setupEvalFormat()
options = { 'detail': "summary" }
stream = StringIO.StringIO()
ro_eval_minim.format(self.eval_result, options, stream)
outtxt = stream.getvalue()
log.debug("---- Result:\n%s\n----"%(outtxt))
expect = (
"Research Object file://%s/:\n"%rodir +
"Nominally complete for %(purpose)s of resource %(target)s\n"%(self.eval_result)
)
self.assertEquals(outtxt, expect)
return
# Research Object file:///usr/workspace/wf4ever-ro-manager/src/iaeval/test/robase/RO_test_minim/:
# Nominally complete for complete of resource http://purl.org/net/chembox/Ethane
# Unsatisfied MAY requirements:
# No synonym is present
# Satisfied requirements:
# ChemSpider identifier is present
# InChI identifier is present
# Research object URI: file:///usr/workspace/wf4ever-ro-manager/src/iaeval/test/robase/RO_test_minim/
# Minimum information URI: file:///usr/workspace/wf4ever-ro-manager/src/iaeval/test/robase/RO_test_minim/Minim-chembox.ttl
def testEvalFormatDetail(self):
rodir = self.setupEvalFormat()
options = { 'detail': "full" }
stream = StringIO.StringIO()
ro_eval_minim.format(self.eval_result, options, stream)
outtxt = stream.getvalue()
log.debug("---- Result:\n%s\n----"%(outtxt))
expect = (
[ "Research Object file://%s/:"%rodir
, "Nominally complete for %(purpose)s of resource %(target)s"%(self.eval_result)
# , "Unsatisfied MUST requirements:"
# , "Unsatisfied SHOULD requirements:"
, "Unsatisfied MAY requirements:"
, " No synonym is present"%(self.eval_result['missingMay'][0][0]['querytestrule'])
, "Satisfied requirements:"
, " ChemSpider identifier is present"
, " InChI identifier is present"
, "Research object URI: %(rouri)s"%(self.eval_result)
, "Minimum information URI: %(minimuri)s"%(self.eval_result)
])
stream.seek(0)
for expect_line in expect:
line = stream.readline()
self.assertEquals(line, expect_line+"\n")
return
def testEvaluateRDF(self):
self.setupConfig()
rodir = self.createTestRo(testbase, "test-chembox", "RO test minim", "ro-testMinim")
rouri = ro_manifest.getRoUri(rodir)
self.populateTestRo(testbase, rodir)
rometa = ro_metadata(ro_config, rodir)
resuri = rometa.getComponentUriAbs("http://purl.org/net/chembox/Ethane")
reslabel = "Ethane"
rometa.addGraphAnnotation(resuri, "Ethane.ttl")
# Now run evaluation against test RO
(minimgr, evalresult) = ro_eval_minim.evaluate(rometa,
"Minim-chembox.ttl", # Minim file
resuri, # Target resource
"complete") # Purpose
resultgr = ro_eval_minim.evalResultGraph(minimgr, evalresult)
log.debug("------ resultgr:\n%s\n----"%(resultgr.serialize(format='turtle'))) # pretty-xml
## print "------ resultgr:\n%s\n----"%(resultgr.serialize(format='turtle'))
# Check response returned
modeluri = rdflib.URIRef('http://example.com/chembox-samples/minim_model')
prefixes = make_sparql_prefixes()
probequeries = (
[ '''ASK { _:r minim:testedRO <%s> ; minim:minimUri <%s> }'''%
(rouri, rometa.getComponentUri("Minim-chembox.ttl"))
, '''ASK { _:r minim:testedRO <%s> ; minim:testedModel <%s> }'''%
(rouri, modeluri)
, '''ASK { _:r minim:testedTarget <%s> ; minim:satisfied [ minim:tryMessage "%s" ] }'''%
(resuri, "InChI identifier is present")
, '''ASK { _:r minim:testedTarget <%s> ; minim:satisfied [ minim:tryMessage "%s" ] }'''%
(resuri, "ChemSpider identifier is present")
, '''ASK { _:r minim:testedTarget <%s> ; minim:missingMay [ minim:tryMessage "%s" ] }'''%
(resuri, "No synomym is present")
, '''ASK { _:r minim:testedTarget <%s> ; minim:nominallySatisfies <%s> }'''%
(resuri, modeluri)
, '''ASK { _:r minim:testedTarget <%s> ; minim:minimallySatisfies <%s> }'''%
(resuri, modeluri)
, '''ASK { <%s> rdfs:label "%s" }'''%
(resuri, reslabel)
])
for q in probequeries:
r = resultgr.query(prefixes+q)
self.assertEqual(r.type, 'ASK', "Result type %s for: %s"%(r.type, q))
self.assertTrue(r.askAnswer, "Failed query: %s"%(q))
self.deleteTestRo(rodir)
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "Pending tests follow"
# Assemble test suite
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
, "testEvalQueryTestModelMin"
, "testEvalQueryTestModelExists"
, "testEvalQueryTestModel"
, "testEvalQueryTestReportList"
, "testEvalQueryTestChembox"
, "testEvalQueryTestChemboxFail"
, "testEvalFormatSummary"
, "testEvalFormatDetail"
, "testEvaluateRDF"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestEvalQueryMatch, testdict, select=select)
if __name__ == "__main__":
TestUtils.runTests("TestEvalQueryMatch.log", getTestSuite, sys.argv)
# End.
| {
"content_hash": "335243353b10840bf5cdb16f913b3cd4",
"timestamp": "",
"source": "github",
"line_count": 550,
"max_line_length": 129,
"avg_line_length": 46.49636363636364,
"alnum_prop": 0.5863215109685997,
"repo_name": "wf4ever/ro-manager",
"id": "4e671787b75d40806b39ee269873b0e26663eec9",
"size": "25592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/iaeval/test/TestEvalQueryMatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7498"
},
{
"name": "HTML",
"bytes": "222435"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Python",
"bytes": "964141"
},
{
"name": "Shell",
"bytes": "39373"
},
{
"name": "TeX",
"bytes": "21071"
}
],
"symlink_target": ""
} |
"""Admin dashboard page smoke tests"""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
import re
import pytest # pylint: disable=import-error
from lib import base
from lib import constants
from lib.page import dashboard
class TestAdminDashboardPage(base.Test):
"""Tests for the admin dashboard, a part of smoke tests, section 1"""
_role_el = constants.element.AdminRolesWidget
_event_el = constants.element.AdminEventsWidget
@pytest.fixture(scope="function")
def admin_dashboard(self, selenium):
selenium.get(dashboard.AdminDashboard.URL)
return dashboard.AdminDashboard(selenium)
@pytest.mark.smoke_tests
def test_roles_widget(self, admin_dashboard):
"""Check count and content of role scopes"""
admin_roles_widget = admin_dashboard.select_roles()
expected_dict = self._role_el.ROLE_SCOPES_DICT
actual_dict = admin_roles_widget.get_role_scopes_text_as_dict()
assert admin_dashboard.tab_roles.member_count == len(expected_dict)
assert expected_dict == actual_dict, "Expected '{}', got '{}'".format(
expected_dict, actual_dict)
@pytest.mark.smoke_tests
def test_events_widget_tree_view_has_data(self, admin_dashboard):
"""Confirms tree view has at least one data row in valid format"""
admin_events_tab = admin_dashboard.select_events()
list_of_items = admin_events_tab.get_events()
assert len(list_of_items) > 0
items_with_incorrect_format = \
[getattr(item, 'text') for item in list_of_items if
not(re.compile(self._event_el.TREE_VIEW_ROW_REGEXP).match(
getattr(item, 'text')))]
assert items_with_incorrect_format == []
assert admin_events_tab.widget_header.text == \
self._event_el.TREE_VIEW_HEADER
| {
"content_hash": "e8fdc423488b8b4b1828cf811181c9af",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 39.86666666666667,
"alnum_prop": 0.7079152731326644,
"repo_name": "josthkko/ggrc-core",
"id": "db5124b41f2b5ba9f5391f225d7b95ca4ac8b86d",
"size": "1907",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "test/selenium/src/tests/test_admin_dashboard_page.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163629"
},
{
"name": "Cucumber",
"bytes": "136321"
},
{
"name": "HTML",
"bytes": "1057288"
},
{
"name": "JavaScript",
"bytes": "1492054"
},
{
"name": "Makefile",
"bytes": "6161"
},
{
"name": "Mako",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "2148568"
},
{
"name": "Shell",
"bytes": "29929"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.