text stringlengths 4 1.02M | meta dict |
|---|---|
from google.cloud import bigquery_connection_v1 as bq_connection
"""This sample shows how to create a BigQuery connection with a Cloud SQL for MySQL database"""
def main() -> None:
# TODO(developer): Set all variables for your Cloud SQL for MySQL connection.
project_id = "your-project-id" # set project_id
location = "US" # set location
# See: https://cloud.google.com/bigquery/docs/locations for a list of
# available locations.
database = "my-database" # set database name
username = "my-username" # set database username
password = "my-password" # set database password
cloud_sql_conn_name = "" # set the name of your connection
cloud_sql_credential = bq_connection.CloudSqlCredential(
{
"username": username,
"password": password,
}
)
cloud_sql_properties = bq_connection.CloudSqlProperties(
{
"type_": bq_connection.CloudSqlProperties.DatabaseType.MYSQL,
"database": database,
"instance_id": cloud_sql_conn_name,
"credential": cloud_sql_credential,
}
)
create_mysql_connection(project_id, location, cloud_sql_properties)
def create_mysql_connection(
project_id: str,
location: str,
cloud_sql_properties: bq_connection.CloudSqlProperties,
) -> None:
connection = bq_connection.types.Connection({"cloud_sql": cloud_sql_properties})
client = bq_connection.ConnectionServiceClient()
parent = client.common_location_path(project_id, location)
request = bq_connection.CreateConnectionRequest(
{"parent": parent, "connection": connection}
)
response = client.create_connection(request)
print(f"Created connection successfully: {response.name}")
# [END bigqueryconnection_create_connection]
| {
"content_hash": "fc1d5dbb03b653d4b1376d1d546ff954",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 95,
"avg_line_length": 36.95918367346939,
"alnum_prop": 0.676974047487576,
"repo_name": "googleapis/python-bigquery-connection",
"id": "a71c9678db0161256de85208608281d7e9161ae6",
"size": "2434",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/create_mysql_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "370142"
},
{
"name": "Shell",
"bytes": "34017"
}
],
"symlink_target": ""
} |
"""Tests for Latex exporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os.path
import textwrap
import re
from .base import ExportersTestsBase
from ..latex import LatexExporter
from IPython.nbformat import write
from IPython.nbformat import v4
from IPython.testing.decorators import onlyif_cmds_exist
from IPython.utils.tempdir import TemporaryDirectory
class TestLatexExporter(ExportersTestsBase):
"""Contains test functions for latex.py"""
exporter_class = LatexExporter
should_include_raw = ['latex']
def test_constructor(self):
"""
Can a LatexExporter be constructed?
"""
LatexExporter()
@onlyif_cmds_exist('pandoc')
def test_export(self):
"""
Can a LatexExporter export something?
"""
(output, resources) = LatexExporter().from_filename(self._get_notebook())
assert len(output) > 0
@onlyif_cmds_exist('pandoc')
def test_export_book(self):
"""
Can a LatexExporter export using 'report' template?
"""
(output, resources) = LatexExporter(template_file='report').from_filename(self._get_notebook())
assert len(output) > 0
@onlyif_cmds_exist('pandoc')
def test_export_basic(self):
"""
Can a LatexExporter export using 'article' template?
"""
(output, resources) = LatexExporter(template_file='article').from_filename(self._get_notebook())
assert len(output) > 0
@onlyif_cmds_exist('pandoc')
def test_export_article(self):
"""
Can a LatexExporter export using 'article' template?
"""
(output, resources) = LatexExporter(template_file='article').from_filename(self._get_notebook())
assert len(output) > 0
@onlyif_cmds_exist('pandoc')
def test_very_long_cells(self):
"""
Torture test that long cells do not cause issues
"""
lorem_ipsum_text = textwrap.dedent("""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec
dignissim, ipsum non facilisis tempus, dui felis tincidunt metus,
nec pulvinar neque odio eget risus. Nulla nisi lectus, cursus
suscipit interdum at, ultrices sit amet orci. Mauris facilisis
imperdiet elit, vitae scelerisque ipsum dignissim non. Integer
consequat malesuada neque sit amet pulvinar. Curabitur pretium
ut turpis eget aliquet. Maecenas sagittis lacus sed lectus
volutpat, eu adipiscing purus pulvinar. Maecenas consequat
luctus urna, eget cursus quam mollis a. Aliquam vitae ornare
erat, non hendrerit urna. Sed eu diam nec massa egestas pharetra
at nec tellus. Fusce feugiat lacus quis urna sollicitudin volutpat.
Quisque at sapien non nibh feugiat tempus ac ultricies purus.
""")
lorem_ipsum_text = lorem_ipsum_text.replace("\n"," ") + "\n\n"
large_lorem_ipsum_text = "".join([lorem_ipsum_text]*3000)
notebook_name = "lorem_ipsum_long.ipynb"
nb = v4.new_notebook(
cells=[
v4.new_markdown_cell(source=large_lorem_ipsum_text)
]
)
with TemporaryDirectory() as td:
nbfile = os.path.join(td, notebook_name)
with open(nbfile, 'w') as f:
write(nb, f, 4)
(output, resources) = LatexExporter(template_file='article').from_filename(nbfile)
assert len(output) > 0
@onlyif_cmds_exist('pandoc')
def test_prompt_number_color(self):
"""
Does LatexExporter properly format input and output prompts in color?
"""
(output, resources) = LatexExporter().from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb"))
in_regex = r"In \[\{\\color\{incolor\}(.*)\}\]:"
out_regex = r"Out\[\{\\color\{outcolor\}(.*)\}\]:"
ins = ["2", "10", " ", " ", "*", "0"]
outs = ["10"]
assert re.findall(in_regex, output) == ins
assert re.findall(out_regex, output) == outs
| {
"content_hash": "c8a988a7ea1448f84c3d5dd787343b8d",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 104,
"avg_line_length": 35.35897435897436,
"alnum_prop": 0.6236403190717912,
"repo_name": "wolfram74/numerical_methods_iserles_notes",
"id": "28807af392720ae48af425325742faa12c673b60",
"size": "4137",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/IPython/nbconvert/exporters/tests/test_latex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "282435"
},
{
"name": "C++",
"bytes": "59801"
},
{
"name": "CSS",
"bytes": "2038"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "6753"
},
{
"name": "HTML",
"bytes": "37522"
},
{
"name": "JavaScript",
"bytes": "1368241"
},
{
"name": "Python",
"bytes": "31296026"
},
{
"name": "Shell",
"bytes": "3869"
},
{
"name": "Smarty",
"bytes": "21425"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from glob import glob
from os import path
def requirements(filename):
return filter(None, [
line.strip()
for line in open(filename).readlines()])
libdir = 'pylib'
aeondir = 'pylib/aeon'
packages = find_packages(libdir)
setup(
name="aeon-venos",
version="0.9.18",
author="Jeremy Schulman",
url='https://github.com/Apstra/aeon-venos',
author_email="jeremy@apstra.com",
description="Aeon vendor NOS driver library",
license="Apache 2.0",
keywords="networking automation vendor-agnostic",
package_dir={'': libdir},
packages=packages,
extras_require={
"eos": ["pyeapi", "pexpect==4.2.1"],
"nxos": ["lxml", "requests", "pexpect==4.2.1"],
"cumulus": ["paramiko>2.0.0", "pexpect==4.2.1"],
"ubuntu": ["paramiko>2.0.0", "pexpect==4.2.1"],
"centos": ["paramiko>2.0.0", "pexpect==4.2.1"]
},
scripts=glob('bin/*'),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
],
)
| {
"content_hash": "fdabf84f8bda6c78e66b1087403ccbec",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 33.82,
"alnum_prop": 0.6132465996451804,
"repo_name": "Apstra/aeon-venos",
"id": "ee6b1e25cc184945c81615825a38894f8bd7427e",
"size": "1911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "130"
},
{
"name": "Python",
"bytes": "91545"
}
],
"symlink_target": ""
} |
from sys import argv
import feedparser
# Error handlers
# --------------
# Check for incorrect usage
if len(argv) == 1:
print("URL is missing.\n")
url = input("Enter it: ")
# Default case
else:
url = argv[1]
# Fix broken links without protocol
if not (url.startswith("http://") or url.startswith("https://")):
url = "http://" + url
# Configuration
# -------------
# Indent
indent_lenght = 2
indent = " " * indent_lenght
# Truncation (depends on indent)
feed_trunc = 59
entry_trunc = 56
# Helper functions
# ----------------
# Bold decoration
def bold(string):
return "\033[1m" + string + "\033[0m"
# Truncation
def trunc(trunc, string):
if len(string) > trunc:
string = string[:trunc] + "..."
return string
# Display data
# ------------
print()
# Feed data
data = feedparser.parse(url)
# Display feed properties
def feed():
print(bold("Feed title: ") + data.feed.title)
if "description" in data.feed:
print(bold("Feed description: ") + trunc(feed_trunc, data.feed.description))
print(bold("Feed link: ") + data.feed.link + "\n")
# Display entries properties
def entries():
print(bold("Feed entries:\n"))
for entry in data.entries:
print(indent + bold("Entry title: ") + entry.title)
if "description" in entry:
print(indent + bold("Entry description: ") + \
trunc(entry_trunc, entry.description))
print(indent + bold("Entry link: ") + entry.link + "\n")
# Display data only when ran as main
def main():
feed()
entries()
if __name__ == "__main__":
main()
| {
"content_hash": "b44490f4c904091e8ebd9fbb6910530f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 80,
"avg_line_length": 20.77027027027027,
"alnum_prop": 0.620039037085231,
"repo_name": "ZDroid/feedstyl",
"id": "580f72836ea05a20362385f92b7874d03ef35083",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feedstyl.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1636"
}
],
"symlink_target": ""
} |
import asyncio
import datetime
import json
import re
from unittest import mock
import pytest
from multidict import CIMultiDict
from aiohttp import HttpVersion, HttpVersion10, HttpVersion11, hdrs, signals
from aiohttp.payload import BytesPayload
from aiohttp.test_utils import make_mocked_request
from aiohttp.web import ContentCoding, Response, StreamResponse, json_response
def make_request(method, path, headers=CIMultiDict(),
version=HttpVersion11, **kwargs):
app = kwargs.pop('app', None) or mock.Mock()
app._debug = False
app.on_response_prepare = signals.Signal(app)
protocol = kwargs.pop('protocol', None) or mock.Mock()
return make_mocked_request(method, path, headers,
version=version, protocol=protocol,
app=app, **kwargs)
@pytest.yield_fixture
def buf():
return bytearray()
@pytest.yield_fixture
def writer(buf):
writer = mock.Mock()
def acquire(cb):
cb(writer.transport)
def buffer_data(chunk):
buf.extend(chunk)
def write(chunk):
buf.extend(chunk)
def write_headers(status_line, headers):
headers = status_line + ''.join(
[k + ': ' + v + '\r\n' for k, v in headers.items()])
headers = headers.encode('utf-8') + b'\r\n'
buf.extend(headers)
@asyncio.coroutine
def write_eof(chunk=b''):
buf.extend(chunk)
writer.acquire.side_effect = acquire
writer.transport.write.side_effect = write
writer.write.side_effect = write
writer.write_eof.side_effect = write_eof
writer.write_headers.side_effect = write_headers
writer.buffer_data.side_effect = buffer_data
writer.drain.return_value = ()
return writer
def test_stream_response_ctor():
resp = StreamResponse()
assert 200 == resp.status
assert resp.keep_alive is None
assert resp.task is None
req = mock.Mock()
resp._req = req
assert resp.task is req.task
def test_content_length():
resp = StreamResponse()
assert resp.content_length is None
def test_content_length_setter():
resp = StreamResponse()
resp.content_length = 234
assert 234 == resp.content_length
def test_content_length_setter_with_enable_chunked_encoding():
resp = StreamResponse()
resp.enable_chunked_encoding()
with pytest.raises(RuntimeError):
resp.content_length = 234
def test_drop_content_length_header_on_setting_len_to_None():
resp = StreamResponse()
resp.content_length = 1
assert "1" == resp.headers['Content-Length']
resp.content_length = None
assert 'Content-Length' not in resp.headers
def test_set_content_length_to_None_on_non_set():
resp = StreamResponse()
resp.content_length = None
assert 'Content-Length' not in resp.headers
resp.content_length = None
assert 'Content-Length' not in resp.headers
def test_setting_content_type():
resp = StreamResponse()
resp.content_type = 'text/html'
assert 'text/html' == resp.headers['content-type']
def test_setting_charset():
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = 'koi8-r'
assert 'text/html; charset=koi8-r' == resp.headers['content-type']
def test_default_charset():
resp = StreamResponse()
assert resp.charset is None
def test_reset_charset():
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = None
assert resp.charset is None
def test_reset_charset_after_setting():
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = 'koi8-r'
resp.charset = None
assert resp.charset is None
def test_charset_without_content_type():
resp = StreamResponse()
with pytest.raises(RuntimeError):
resp.charset = 'koi8-r'
def test_last_modified_initial():
resp = StreamResponse()
assert resp.last_modified is None
def test_last_modified_string():
resp = StreamResponse()
dt = datetime.datetime(1990, 1, 2, 3, 4, 5, 0, datetime.timezone.utc)
resp.last_modified = 'Mon, 2 Jan 1990 03:04:05 GMT'
assert resp.last_modified == dt
def test_last_modified_timestamp():
resp = StreamResponse()
dt = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, datetime.timezone.utc)
resp.last_modified = 0
assert resp.last_modified == dt
resp.last_modified = 0.0
assert resp.last_modified == dt
def test_last_modified_datetime():
resp = StreamResponse()
dt = datetime.datetime(2001, 2, 3, 4, 5, 6, 0, datetime.timezone.utc)
resp.last_modified = dt
assert resp.last_modified == dt
def test_last_modified_reset():
resp = StreamResponse()
resp.last_modified = 0
resp.last_modified = None
assert resp.last_modified is None
@asyncio.coroutine
def test_start():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert resp.keep_alive is None
msg = yield from resp.prepare(req)
assert msg.write_headers.called
msg2 = yield from resp.prepare(req)
assert msg is msg2
assert resp.keep_alive
req2 = make_request('GET', '/')
# with pytest.raises(RuntimeError):
msg3 = yield from resp.prepare(req2)
assert msg is msg3
@asyncio.coroutine
def test_chunked_encoding():
req = make_request('GET', '/')
resp = StreamResponse()
assert not resp.chunked
resp.enable_chunked_encoding()
assert resp.chunked
msg = yield from resp.prepare(req)
assert msg.chunked
def test_enable_chunked_encoding_with_content_length():
resp = StreamResponse()
resp.content_length = 234
with pytest.raises(RuntimeError):
resp.enable_chunked_encoding()
@asyncio.coroutine
def test_chunk_size():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.chunked
with pytest.warns(DeprecationWarning):
resp.enable_chunked_encoding(chunk_size=8192)
assert resp.chunked
msg = yield from resp.prepare(req)
assert msg.chunked
assert msg.enable_chunking.called
assert msg.filter is not None
@asyncio.coroutine
def test_chunked_encoding_forbidden_for_http_10():
req = make_request('GET', '/', version=HttpVersion10)
resp = StreamResponse()
resp.enable_chunked_encoding()
with pytest.raises(RuntimeError) as ctx:
yield from resp.prepare(req)
assert re.match("Using chunked encoding is forbidden for HTTP/1.0",
str(ctx.value))
@asyncio.coroutine
def test_compression_no_accept():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.chunked
assert not resp.compression
resp.enable_compression()
assert resp.compression
msg = yield from resp.prepare(req)
assert not msg.enable_compression.called
@asyncio.coroutine
def test_force_compression_no_accept_backwards_compat():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.chunked
assert not resp.compression
resp.enable_compression(force=True)
assert resp.compression
msg = yield from resp.prepare(req)
assert msg.enable_compression.called
assert msg.filter is not None
@asyncio.coroutine
def test_force_compression_false_backwards_compat():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.compression
resp.enable_compression(force=False)
assert resp.compression
msg = yield from resp.prepare(req)
assert not msg.enable_compression.called
@asyncio.coroutine
def test_compression_default_coding():
req = make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
assert not resp.chunked
assert not resp.compression
resp.enable_compression()
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('deflate')
assert 'deflate' == resp.headers.get(hdrs.CONTENT_ENCODING)
assert msg.filter is not None
@asyncio.coroutine
def test_force_compression_deflate():
req = make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
resp.enable_compression(ContentCoding.deflate)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('deflate')
assert 'deflate' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_force_compression_no_accept_deflate():
req = make_request('GET', '/')
resp = StreamResponse()
resp.enable_compression(ContentCoding.deflate)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('deflate')
assert 'deflate' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_force_compression_gzip():
req = make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
resp.enable_compression(ContentCoding.gzip)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('gzip')
assert 'gzip' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_force_compression_no_accept_gzip():
req = make_request('GET', '/')
resp = StreamResponse()
resp.enable_compression(ContentCoding.gzip)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('gzip')
assert 'gzip' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_change_content_length_if_compression_enabled():
req = make_request('GET', '/')
resp = Response(body=b'answer')
resp.enable_compression(ContentCoding.gzip)
yield from resp.prepare(req)
assert resp.content_length is not None and \
resp.content_length != len(b'answer')
@asyncio.coroutine
def test_set_content_length_if_compression_enabled():
writer = mock.Mock()
def write_headers(status_line, headers):
assert hdrs.CONTENT_LENGTH in headers
assert headers[hdrs.CONTENT_LENGTH] == '26'
assert hdrs.TRANSFER_ENCODING not in headers
writer.write_headers.side_effect = write_headers
req = make_request('GET', '/', payload_writer=writer)
resp = Response(body=b'answer')
resp.enable_compression(ContentCoding.gzip)
yield from resp.prepare(req)
assert resp.content_length == 26
del resp.headers[hdrs.CONTENT_LENGTH]
assert resp.content_length == 26
@asyncio.coroutine
def test_remove_content_length_if_compression_enabled_http11():
writer = mock.Mock()
def write_headers(status_line, headers):
assert hdrs.CONTENT_LENGTH not in headers
assert headers.get(hdrs.TRANSFER_ENCODING, '') == 'chunked'
writer.write_headers.side_effect = write_headers
req = make_request('GET', '/', payload_writer=writer)
resp = StreamResponse()
resp.content_length = 123
resp.enable_compression(ContentCoding.gzip)
yield from resp.prepare(req)
assert resp.content_length is None
@asyncio.coroutine
def test_remove_content_length_if_compression_enabled_http10():
writer = mock.Mock()
def write_headers(status_line, headers):
assert hdrs.CONTENT_LENGTH not in headers
assert hdrs.TRANSFER_ENCODING not in headers
writer.write_headers.side_effect = write_headers
req = make_request('GET', '/', version=HttpVersion10,
payload_writer=writer)
resp = StreamResponse()
resp.content_length = 123
resp.enable_compression(ContentCoding.gzip)
yield from resp.prepare(req)
assert resp.content_length is None
@asyncio.coroutine
def test_force_compression_identity():
writer = mock.Mock()
def write_headers(status_line, headers):
assert hdrs.CONTENT_LENGTH in headers
assert hdrs.TRANSFER_ENCODING not in headers
writer.write_headers.side_effect = write_headers
req = make_request('GET', '/',
payload_writer=writer)
resp = StreamResponse()
resp.content_length = 123
resp.enable_compression(ContentCoding.identity)
yield from resp.prepare(req)
assert resp.content_length == 123
@asyncio.coroutine
def test_force_compression_identity_response():
writer = mock.Mock()
def write_headers(status_line, headers):
assert headers[hdrs.CONTENT_LENGTH] == "6"
assert hdrs.TRANSFER_ENCODING not in headers
writer.write_headers.side_effect = write_headers
req = make_request('GET', '/',
payload_writer=writer)
resp = Response(body=b'answer')
resp.enable_compression(ContentCoding.identity)
yield from resp.prepare(req)
assert resp.content_length == 6
@asyncio.coroutine
def test_remove_content_length_if_compression_enabled_on_payload_http11():
writer = mock.Mock()
def write_headers(status_line, headers):
assert hdrs.CONTENT_LENGTH not in headers
assert headers.get(hdrs.TRANSFER_ENCODING, '') == 'chunked'
writer.write_headers.side_effect = write_headers
req = make_request('GET', '/', payload_writer=writer)
payload = BytesPayload(b'answer', headers={"X-Test-Header": "test"})
resp = Response(body=payload)
assert resp.content_length == 6
resp.body = payload
resp.enable_compression(ContentCoding.gzip)
yield from resp.prepare(req)
assert resp.content_length is None
@asyncio.coroutine
def test_remove_content_length_if_compression_enabled_on_payload_http10():
writer = mock.Mock()
def write_headers(status_line, headers):
assert hdrs.CONTENT_LENGTH not in headers
assert hdrs.TRANSFER_ENCODING not in headers
writer.write_headers.side_effect = write_headers
req = make_request('GET', '/', version=HttpVersion10,
payload_writer=writer)
resp = Response(body=BytesPayload(b'answer'))
resp.enable_compression(ContentCoding.gzip)
yield from resp.prepare(req)
assert resp.content_length is None
@asyncio.coroutine
def test_content_length_on_chunked():
req = make_request('GET', '/')
resp = Response(body=b'answer')
assert resp.content_length == 6
resp.enable_chunked_encoding()
assert resp.content_length is None
yield from resp.prepare(req)
@asyncio.coroutine
def test_write_non_byteish():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with pytest.raises(AssertionError):
resp.write(123)
def test_write_before_start():
resp = StreamResponse()
with pytest.raises(RuntimeError):
resp.write(b'data')
@asyncio.coroutine
def test_cannot_write_after_eof():
resp = StreamResponse()
writer = mock.Mock()
resp_impl = yield from resp.prepare(
make_request('GET', '/', writer=writer))
resp_impl.write_eof = mock.Mock()
resp_impl.write_eof.return_value = ()
resp.write(b'data')
yield from resp.write_eof()
writer.write.reset_mock()
with pytest.raises(RuntimeError):
resp.write(b'next data')
assert not writer.write.called
@asyncio.coroutine
def test___repr___after_eof():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
assert resp.prepared
resp.write(b'data')
yield from resp.write_eof()
assert not resp.prepared
resp_repr = repr(resp)
assert resp_repr == '<StreamResponse OK eof>'
@asyncio.coroutine
def test_cannot_write_eof_before_headers():
resp = StreamResponse()
with pytest.raises(AssertionError):
yield from resp.write_eof()
@asyncio.coroutine
def test_cannot_write_eof_twice():
resp = StreamResponse()
writer = mock.Mock()
resp_impl = yield from resp.prepare(make_request('GET', '/'))
resp_impl.write = mock.Mock()
resp_impl.write_eof = mock.Mock()
resp_impl.write_eof.return_value = ()
resp.write(b'data')
assert resp_impl.write.called
yield from resp.write_eof()
resp_impl.write.reset_mock()
yield from resp.write_eof()
assert not writer.write.called
@asyncio.coroutine
def _test_write_returns_drain():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with mock.patch('aiohttp.http_writer.noop') as noop:
assert noop == resp.write(b'data')
@asyncio.coroutine
def _test_write_returns_empty_tuple_on_empty_data():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with mock.patch('aiohttp.http_writer.noop') as noop:
assert noop.return_value == resp.write(b'')
def test_force_close():
resp = StreamResponse()
assert resp.keep_alive is None
resp.force_close()
assert resp.keep_alive is False
@asyncio.coroutine
def test_response_output_length():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with pytest.warns(DeprecationWarning):
assert resp.output_length
def test_response_cookies():
resp = StreamResponse()
assert resp.cookies == {}
assert str(resp.cookies) == ''
resp.set_cookie('name', 'value')
assert str(resp.cookies) == 'Set-Cookie: name=value; Path=/'
resp.set_cookie('name', 'other_value')
assert str(resp.cookies) == 'Set-Cookie: name=other_value; Path=/'
resp.cookies['name'] = 'another_other_value'
resp.cookies['name']['max-age'] = 10
assert (str(resp.cookies) ==
'Set-Cookie: name=another_other_value; Max-Age=10; Path=/')
resp.del_cookie('name')
expected = ('Set-Cookie: name=("")?; '
'expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/')
assert re.match(expected, str(resp.cookies))
resp.set_cookie('name', 'value', domain='local.host')
expected = 'Set-Cookie: name=value; Domain=local.host; Path=/'
assert str(resp.cookies) == expected
def test_response_cookie_path():
resp = StreamResponse()
assert resp.cookies == {}
resp.set_cookie('name', 'value', path='/some/path')
assert str(resp.cookies) == 'Set-Cookie: name=value; Path=/some/path'
resp.set_cookie('name', 'value', expires='123')
assert (str(resp.cookies) ==
'Set-Cookie: name=value; expires=123; Path=/')
resp.set_cookie('name', 'value', domain='example.com',
path='/home', expires='123', max_age='10',
secure=True, httponly=True, version='2.0')
assert (str(resp.cookies).lower() == 'set-cookie: name=value; '
'domain=example.com; '
'expires=123; '
'httponly; '
'max-age=10; '
'path=/home; '
'secure; '
'version=2.0')
def test_response_cookie__issue_del_cookie():
resp = StreamResponse()
assert resp.cookies == {}
assert str(resp.cookies) == ''
resp.del_cookie('name')
expected = ('Set-Cookie: name=("")?; '
'expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/')
assert re.match(expected, str(resp.cookies))
def test_cookie_set_after_del():
resp = StreamResponse()
resp.del_cookie('name')
resp.set_cookie('name', 'val')
# check for Max-Age dropped
expected = 'Set-Cookie: name=val; Path=/'
assert str(resp.cookies) == expected
def test_set_status_with_reason():
resp = StreamResponse()
resp.set_status(200, "Everithing is fine!")
assert 200 == resp.status
assert "Everithing is fine!" == resp.reason
@asyncio.coroutine
def test_start_force_close():
req = make_request('GET', '/')
resp = StreamResponse()
resp.force_close()
assert not resp.keep_alive
yield from resp.prepare(req)
assert not resp.keep_alive
@asyncio.coroutine
def test___repr__():
req = make_request('GET', '/path/to')
resp = StreamResponse(reason=301)
yield from resp.prepare(req)
assert "<StreamResponse 301 GET /path/to >" == repr(resp)
def test___repr___not_prepared():
resp = StreamResponse(reason=301)
assert "<StreamResponse 301 not prepared>" == repr(resp)
@asyncio.coroutine
def test_keep_alive_http10_default():
req = make_request('GET', '/', version=HttpVersion10)
resp = StreamResponse()
yield from resp.prepare(req)
assert not resp.keep_alive
@asyncio.coroutine
def test_keep_alive_http10_switched_on():
headers = CIMultiDict(Connection='keep-alive')
req = make_request('GET', '/', version=HttpVersion10, headers=headers)
req._message = req._message._replace(should_close=False)
resp = StreamResponse()
yield from resp.prepare(req)
assert resp.keep_alive
@asyncio.coroutine
def test_keep_alive_http09():
headers = CIMultiDict(Connection='keep-alive')
req = make_request('GET', '/', version=HttpVersion(0, 9), headers=headers)
resp = StreamResponse()
yield from resp.prepare(req)
assert not resp.keep_alive
@asyncio.coroutine
def test_prepare_twice():
req = make_request('GET', '/')
resp = StreamResponse()
impl1 = yield from resp.prepare(req)
impl2 = yield from resp.prepare(req)
assert impl1 is impl2
@asyncio.coroutine
def test_prepare_calls_signal():
app = mock.Mock()
req = make_request('GET', '/', app=app)
resp = StreamResponse()
sig = mock.Mock()
app.on_response_prepare.append(sig)
yield from resp.prepare(req)
sig.assert_called_with(req, resp)
def test_get_nodelay_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.tcp_nodelay
def test_set_nodelay_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.set_tcp_nodelay(True)
@asyncio.coroutine
def test_get_nodelay_prepared():
resp = StreamResponse()
writer = mock.Mock()
writer.tcp_nodelay = False
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
assert not resp.tcp_nodelay
@asyncio.coroutine
def test_set_nodelay_prepared():
resp = StreamResponse()
writer = mock.Mock()
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
resp.set_tcp_nodelay(True)
writer.set_tcp_nodelay.assert_called_with(True)
def test_get_cork_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.tcp_cork
def test_set_cork_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.set_tcp_cork(True)
@asyncio.coroutine
def test_get_cork_prepared():
resp = StreamResponse()
writer = mock.Mock()
writer.tcp_cork = False
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
assert not resp.tcp_cork
@asyncio.coroutine
def test_set_cork_prepared():
resp = StreamResponse()
writer = mock.Mock()
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
resp.set_tcp_cork(True)
writer.set_tcp_cork.assert_called_with(True)
# Response class
def test_response_ctor():
resp = Response()
assert 200 == resp.status
assert 'OK' == resp.reason
assert resp.body is None
assert resp.content_length == 0
assert 'CONTENT-LENGTH' not in resp.headers
def test_ctor_with_headers_and_status():
resp = Response(body=b'body', status=201,
headers={'Age': '12', 'DATE': 'date'})
assert 201 == resp.status
assert b'body' == resp.body
assert resp.headers['AGE'] == '12'
resp._start(mock.Mock(version=HttpVersion11))
assert 4 == resp.content_length
assert resp.headers['CONTENT-LENGTH'] == '4'
def test_ctor_content_type():
resp = Response(content_type='application/json')
assert 200 == resp.status
assert 'OK' == resp.reason
assert 0 == resp.content_length
assert (CIMultiDict([('CONTENT-TYPE', 'application/json')]) ==
resp.headers)
def test_ctor_text_body_combined():
with pytest.raises(ValueError):
Response(body=b'123', text='test text')
def test_ctor_text():
resp = Response(text='test text')
assert 200 == resp.status
assert 'OK' == resp.reason
assert 9 == resp.content_length
assert (CIMultiDict(
[('CONTENT-TYPE', 'text/plain; charset=utf-8')]) == resp.headers)
assert resp.body == b'test text'
assert resp.text == 'test text'
resp.headers['DATE'] = 'date'
resp._start(mock.Mock(version=HttpVersion11))
assert resp.headers['CONTENT-LENGTH'] == '9'
def test_ctor_charset():
resp = Response(text='текст', charset='koi8-r')
assert 'текст'.encode('koi8-r') == resp.body
assert 'koi8-r' == resp.charset
def test_ctor_charset_default_utf8():
resp = Response(text='test test', charset=None)
assert 'utf-8' == resp.charset
def test_ctor_charset_in_content_type():
with pytest.raises(ValueError):
Response(text='test test', content_type='text/plain; charset=utf-8')
def test_ctor_charset_without_text():
resp = Response(content_type='text/plain', charset='koi8-r')
assert 'koi8-r' == resp.charset
def test_ctor_both_content_type_param_and_header_with_text():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
content_type='text/html', text='text')
def test_ctor_both_charset_param_and_header_with_text():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
charset='koi8-r', text='text')
def test_ctor_both_content_type_param_and_header():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
content_type='text/html')
def test_ctor_both_charset_param_and_header():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
charset='koi8-r')
def test_assign_nonbyteish_body():
resp = Response(body=b'data')
with pytest.raises(ValueError):
resp.body = 123
assert b'data' == resp.body
assert 4 == resp.content_length
resp.headers['DATE'] = 'date'
resp._start(mock.Mock(version=HttpVersion11))
assert resp.headers['CONTENT-LENGTH'] == '4'
assert 4 == resp.content_length
def test_assign_nonstr_text():
resp = Response(text='test')
with pytest.raises(AssertionError):
resp.text = b'123'
assert b'test' == resp.body
assert 4 == resp.content_length
def test_response_set_content_length():
resp = Response()
with pytest.raises(RuntimeError):
resp.content_length = 1
@asyncio.coroutine
def test_send_headers_for_empty_body(buf, writer):
req = make_request('GET', '/', payload_writer=writer)
resp = Response()
yield from resp.prepare(req)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 200 OK\r\n'
'Content-Length: 0\r\n'
'Content-Type: application/octet-stream\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n', txt)
@asyncio.coroutine
def test_render_with_body(buf, writer):
req = make_request('GET', '/', payload_writer=writer)
resp = Response(body=b'data')
yield from resp.prepare(req)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 200 OK\r\n'
'Content-Length: 4\r\n'
'Content-Type: application/octet-stream\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n'
'data', txt)
@asyncio.coroutine
def test_send_set_cookie_header(buf, writer):
resp = Response()
resp.cookies['name'] = 'value'
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 200 OK\r\n'
'Content-Length: 0\r\n'
'Set-Cookie: name=value\r\n'
'Content-Type: application/octet-stream\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n', txt)
@asyncio.coroutine
def test_consecutive_write_eof():
req = make_request('GET', '/')
data = b'data'
resp = Response(body=data)
yield from resp.prepare(req)
with mock.patch('aiohttp.web.StreamResponse.write_eof') as super_write_eof:
yield from resp.write_eof()
resp._eof_sent = True
yield from resp.write_eof()
super_write_eof.assert_called_once_with(data)
def test_set_text_with_content_type():
resp = Response()
resp.content_type = "text/html"
resp.text = "text"
assert "text" == resp.text
assert b"text" == resp.body
assert "text/html" == resp.content_type
def test_set_text_with_charset():
resp = Response()
resp.content_type = 'text/plain'
resp.charset = "KOI8-R"
resp.text = "текст"
assert "текст" == resp.text
assert "текст".encode('koi8-r') == resp.body
assert "koi8-r" == resp.charset
def test_default_content_type_in_stream_response():
resp = StreamResponse()
assert resp.content_type == 'application/octet-stream'
def test_default_content_type_in_response():
resp = Response()
assert resp.content_type == 'application/octet-stream'
def test_content_type_with_set_text():
resp = Response(text='text')
assert resp.content_type == 'text/plain'
def test_content_type_with_set_body():
resp = Response(body=b'body')
assert resp.content_type == 'application/octet-stream'
def test_started_when_not_started():
resp = StreamResponse()
assert not resp.prepared
@asyncio.coroutine
def test_started_when_started():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
assert resp.prepared
@asyncio.coroutine
def test_drain_before_start():
resp = StreamResponse()
with pytest.raises(AssertionError):
yield from resp.drain()
@asyncio.coroutine
def test_changing_status_after_prepare_raises():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with pytest.raises(AssertionError):
resp.set_status(400)
def test_nonstr_text_in_ctor():
with pytest.raises(TypeError):
Response(text=b'data')
def test_text_in_ctor_with_content_type():
resp = Response(text='data', content_type='text/html')
assert 'data' == resp.text
assert 'text/html' == resp.content_type
def test_text_in_ctor_with_content_type_header():
resp = Response(text='текст',
headers={'Content-Type': 'text/html; charset=koi8-r'})
assert 'текст'.encode('koi8-r') == resp.body
assert 'text/html' == resp.content_type
assert 'koi8-r' == resp.charset
def test_text_in_ctor_with_content_type_header_multidict():
headers = CIMultiDict({'Content-Type': 'text/html; charset=koi8-r'})
resp = Response(text='текст',
headers=headers)
assert 'текст'.encode('koi8-r') == resp.body
assert 'text/html' == resp.content_type
assert 'koi8-r' == resp.charset
def test_body_in_ctor_with_content_type_header_multidict():
headers = CIMultiDict({'Content-Type': 'text/html; charset=koi8-r'})
resp = Response(body='текст'.encode('koi8-r'),
headers=headers)
assert 'текст'.encode('koi8-r') == resp.body
assert 'text/html' == resp.content_type
assert 'koi8-r' == resp.charset
def test_text_with_empty_payload():
resp = Response(status=200)
assert resp.body is None
assert resp.text is None
def test_response_with_content_length_header_without_body():
resp = Response(headers={'Content-Length': 123})
assert resp.content_length == 123
class TestJSONResponse:
def test_content_type_is_application_json_by_default(self):
resp = json_response('')
assert 'application/json' == resp.content_type
def test_passing_text_only(self):
resp = json_response(text=json.dumps('jaysawn'))
assert resp.text == json.dumps('jaysawn')
def test_data_and_text_raises_value_error(self):
with pytest.raises(ValueError) as excinfo:
json_response(data='foo', text='bar')
expected_message = (
'only one of data, text, or body should be specified'
)
assert expected_message == excinfo.value.args[0]
def test_data_and_body_raises_value_error(self):
with pytest.raises(ValueError) as excinfo:
json_response(data='foo', body=b'bar')
expected_message = (
'only one of data, text, or body should be specified'
)
assert expected_message == excinfo.value.args[0]
def test_text_is_json_encoded(self):
resp = json_response({'foo': 42})
assert json.dumps({'foo': 42}) == resp.text
def test_content_type_is_overrideable(self):
resp = json_response({'foo': 42},
content_type='application/vnd.json+api')
assert 'application/vnd.json+api' == resp.content_type
| {
"content_hash": "dde7d7b3007b3a7b0e436bb4344c1d73",
"timestamp": "",
"source": "github",
"line_count": 1213,
"max_line_length": 79,
"avg_line_length": 27.39653751030503,
"alnum_prop": 0.6554525758305247,
"repo_name": "Eyepea/aiohttp",
"id": "72c0cc754a6d8ccbbfd2be9c3e8887b28459236a",
"size": "33287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_web_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1738"
},
{
"name": "PowerShell",
"bytes": "3361"
},
{
"name": "Python",
"bytes": "935198"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect, get_object_or_404
from .models import Article, Comment, Poll, NewUser
from .forms import CommmentForm, LoginForm, RegisterForm, SetInfoForm, SearchForm
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from django.views.decorators.cache import cache_page
import markdown2, urlparse
def index(request):
latest_article_list = Article.objects.query_by_time()
loginform = LoginForm()
context = {'latest_article_list': latest_article_list, 'loginform':loginform}
return render(request, 'home_page.html', context)
def article(request, article_id):
'''
try: # since visitor input a url with invalid id
article = Article.objects.get(pk=article_id) # pk???
except Article.DoesNotExist:
raise Http404("Article does not exist")
''' # shortcut:
article = get_object_or_404(Article, id=article_id)
content = markdown2.markdown(article.content, extras=["code-friendly",
"fenced-code-blocks", "header-ids", "toc", "metadata"])
commentform = CommmentForm()
loginform = LoginForm()
comments = article.comment_set.all
return render(request, 'article_page.html', {
'article': article,
'loginform':loginform,
'commentform':commentform,
'content': content,
'comments': comments
})
# product list
def product_list_main(request):
return render(request, 'product_index.html')
@login_required
def comment(request, article_id):
form = CommmentForm(request.POST)
url = urlparse.urljoin('/focus/', article_id)
if form.is_valid():
user = request.user
article = Article.objects.get(id=article_id)
new_comment = form.cleaned_data['comment']
c = Comment(content=new_comment, article_id=article_id) # have tested by shell
c.user = user
c.save()
article.comment_num += 1
return redirect(url)
@login_required
def get_keep(request, article_id):
logged_user = request.user
article = Article.objects.get(id=article_id)
articles = logged_user.article_set.all()
if article not in articles:
article.user.add(logged_user) # for m2m linking, have tested by shell
article.keep_num += 1
article.save()
return redirect('/focus/')
else:
url = urlparse.urljoin('/focus/', article_id)
return redirect(url)
@login_required
def get_poll_article(request,article_id):
logged_user = request.user
article = Article.objects.get(id=article_id)
polls = logged_user.poll_set.all()
articles = []
for poll in polls:
articles.append(poll.article)
if article in articles:
url = urlparse.urljoin('/focus/', article_id)
return redirect(url)
else:
article.poll_num += 1
article.save()
poll = Poll(user=logged_user, article=article)
poll.save()
data = {}
return redirect('/focus/')
def log_in(request):
if request.method == 'GET':
form = LoginForm()
return render(request, 'login.html', {'form': form})
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['uid']
password = form.cleaned_data['pwd']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
url = request.POST.get('source_url','/focus')
return redirect(url)
else:
return render(request,'login.html', {'form':form, 'error': "password or username is not ture!"})
else:
return render(request, 'login.html', {'form': form})
@login_required
def log_out(request):
url = request.POST.get('source_url', '/focus/')
logout(request)
return redirect(url)
def register(request):
error1 = "this name is already exist"
valid = "this name is valid"
if request.method == 'GET':
form = RegisterForm()
return render(request, 'register.html', {'form': form})
if request.method == 'POST':
form = RegisterForm(request.POST)
if request.POST.get('raw_username', 'erjgiqfv240hqp5668ej23foi') != 'erjgiqfv240hqp5668ej23foi': # if ajax
try:
user = NewUser.objects.get(username=request.POST.get('raw_username', ''))
except ObjectDoesNotExist:
return render(request, 'register.html', {'form': form, 'msg': valid})
else:
return render(request, 'register.html', {'form': form, 'msg': error1})
else:
if form.is_valid():
username = form.cleaned_data['username']
email = form.cleaned_data['email']
password1 = form.cleaned_data['password1']
password2 = form.cleaned_data['password2']
if password1 != password2:
return render(request, 'register.html', {'form': form, 'msg': "two password is not equal"})
else:
user = NewUser(username=username, email=email, password=password1)
user.save()
# return render(request, 'login.html', {'success': "you have successfully registered!"})
return redirect('/focus/login')
else:
return render(request, 'register.html', {'form': form})
| {
"content_hash": "1277a04c81224ea8a60132eace8fe94f",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 109,
"avg_line_length": 32.16339869281046,
"alnum_prop": 0.7037187563503353,
"repo_name": "polegithub/shopping_web_python",
"id": "714bcd7e705d8b1393c26e03617f2c624405ce36",
"size": "4921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shopping_web/focus/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18243"
},
{
"name": "HTML",
"bytes": "69378"
},
{
"name": "JavaScript",
"bytes": "2301"
},
{
"name": "Python",
"bytes": "22098"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from django.apps import AppConfig
class PollsConfig(AppConfig):
name = "polls"
| {
"content_hash": "98698484db874a7ac7694796b9192717",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 34,
"avg_line_length": 17.285714285714285,
"alnum_prop": 0.743801652892562,
"repo_name": "rochacbruno/dynaconf",
"id": "654d697e2c39c6113b53ec9a9baf18fcfa492a10",
"size": "121",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "example/django_pure/polls/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2867"
},
{
"name": "Makefile",
"bytes": "11505"
},
{
"name": "Python",
"bytes": "1438471"
},
{
"name": "Shell",
"bytes": "14740"
}
],
"symlink_target": ""
} |
import logging
logging.basicConfig(filename="sample.log", level=logging.INFO)
log = logging.getLogger("ex")
try:
raise RunTimeError
except Exception, err:
log.exception("Error!")
| {
"content_hash": "33d483d9d29b443a6c75a3ec62e163ef",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 62,
"avg_line_length": 21,
"alnum_prop": 0.7407407407407407,
"repo_name": "talapus/Ophidian",
"id": "26bbcfa89b6aba11db986e5b35998ff72f8c1839",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Logging/log_traceback.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "154649"
},
{
"name": "JavaScript",
"bytes": "3364"
},
{
"name": "Python",
"bytes": "314611"
},
{
"name": "Shell",
"bytes": "16809"
}
],
"symlink_target": ""
} |
import os
import unittest
import mock
import consul
from rpaas import consul_manager, nginx
class ConsulManagerTestCase(unittest.TestCase):
def setUp(self):
self.master_token = "rpaas-test"
os.environ.setdefault("RPAAS_SERVICE_NAME", "test-suite-rpaas")
os.environ.setdefault("CONSUL_HOST", "127.0.0.1")
os.environ.setdefault("CONSUL_TOKEN", self.master_token)
self.consul = consul.Consul(token=self.master_token)
self.consul.kv.delete("test-suite-rpaas", recurse=True)
self.consul.kv.put("test-suite-rpaas/myrpaas/safe_key", "x")
self.ignore_safe_key = False
self._remove_tokens()
self.manager = consul_manager.ConsulManager(os.environ)
def tearDown(self):
if not self.ignore_safe_key:
value = self.consul.kv.get("test-suite-rpaas/myrpaas/safe_key")[1]['Value']
self.assertEqual(value, "x")
def _remove_tokens(self):
for token in self.consul.acl.list():
if token["ID"] not in (self.master_token, "anonymous"):
self.consul.acl.destroy(token["ID"])
def test_generate_token(self):
token = self.manager.generate_token("myrpaas")
acl = self.consul.acl.info(token)
expected_rules = consul_manager.ACL_TEMPLATE.format(service_name="test-suite-rpaas",
instance_name="myrpaas")
self.assertEqual("test-suite-rpaas/myrpaas/token", acl["Name"])
self.assertEqual(expected_rules, acl["Rules"])
self.assertEqual("client", acl["Type"])
def test_destroy_token(self):
token = self.manager.generate_token("myrpaas")
self.manager.destroy_token(token)
self.assertIsNone(self.consul.acl.info(token))
def test_destroy_instance(self):
self.manager.write_healthcheck("myrpaas")
self.manager.write_location("myrpaas", "/", destination="http://myapp.tsuru.io")
self.manager.destroy_instance("myrpaas")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/healthcheck")
self.assertIsNone(item[1])
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/ROOT")
self.assertIsNone(item[1])
self.ignore_safe_key = True
def test_remove_node(self):
self.consul.kv.put("test-suite-rpaas/myrpaas/status/test-server", "service OK")
self.consul.kv.put("test-suite-rpaas/myrpaas/status/test-server-2", "service OK")
self.consul.kv.put("test-suite-rpaas/myrpaas/ssl/cert", "cert")
self.consul.kv.put("test-suite-rpaas/myrpaas/ssl/test-server-id/cert", "cert")
self.consul.kv.put("test-suite-rpaas/myrpaas/ssl/test-server-2-id/cert", "cert")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/status/test-server")
self.assertEqual(item[1]["Value"], "service OK")
self.manager.remove_node("myrpaas", "test-server", "test-server-id")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/status/test-server")
self.assertIsNone(item[1])
item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/test-server-id/cert")
self.assertIsNone(item[1])
item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/test-server-2-id/cert")
self.assertEqual(item[1]["Value"], "cert")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/cert")
self.assertEqual(item[1]["Value"], "cert")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/status/test-server-2")
self.assertEqual(item[1]["Value"], "service OK")
def test_node_hostname(self):
host = '127.0.0.1'
node_hostname = self.manager.node_hostname(host)
self.assertEqual('rpaas-test', node_hostname)
def test_node_hostname_not_found(self):
host = mock.Mock()
host.dns_name = '10.0.0.1'
node_hostname = self.manager.node_hostname(host)
self.assertEqual(None, node_hostname)
def test_node_status(self):
self.consul.kv.put("test-suite-rpaas/myrpaas/status/my-server-1", "service OK")
self.consul.kv.put("test-suite-rpaas/myrpaas/status/my-server-2", "service DEAD")
node_status = self.manager.node_status("myrpaas")
self.assertDictEqual(node_status, {'my-server-1': 'service OK', 'my-server-2': 'service DEAD'})
def test_write_healthcheck(self):
self.manager.write_healthcheck("myrpaas")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/healthcheck")
self.assertEqual("true", item[1]["Value"])
def test_remove_healthcheck(self):
self.manager.write_healthcheck("myrpaas")
self.manager.remove_healthcheck("myrpaas")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/healthcheck")
self.assertIsNone(item[1])
def test_write_location_root(self):
self.manager.write_location("myrpaas", "/", destination="http://myapp.tsuru.io")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/ROOT")
expected = nginx.NGINX_LOCATION_TEMPLATE_DEFAULT.format(path="/",
host="http://myapp.tsuru.io",
upstream="myapp.tsuru.io",
https_only='')
self.assertEqual(expected, item[1]["Value"])
servers = self.manager.list_upstream("myrpaas", "myapp.tsuru.io")
self.assertEqual(set(["myapp.tsuru.io"]), servers)
def test_write_location_root_with_https(self):
self.manager.write_location("myrpaas", "/", destination="http://myapp.tsuru.io", https_only=True)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/ROOT")
expected = nginx.NGINX_LOCATION_TEMPLATE_DEFAULT.format(path="/",
host="http://myapp.tsuru.io",
upstream="myapp.tsuru.io",
https_only=nginx.NGINX_HTTPS_ONLY)
self.assertEqual(expected, item[1]["Value"])
servers = self.manager.list_upstream("myrpaas", "myapp.tsuru.io")
self.assertEqual(set(["myapp.tsuru.io"]), servers)
def test_write_location_root_bind_mode(self):
self.manager.write_location("myrpaas", "/", destination="http://myapp.tsuru.io", bind_mode=True)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/ROOT")
expected = nginx.NGINX_LOCATION_TEMPLATE_DEFAULT.format(path="/",
host="http://myapp.tsuru.io",
upstream="rpaas_default_upstream",
https_only='')
self.assertEqual(expected, item[1]["Value"])
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/rpaas_default_upstream")
servers = self.manager.list_upstream("myrpaas", "rpaas_default_upstream")
self.assertEqual(set(["myapp.tsuru.io"]), servers)
def test_write_location_root_router_mode(self):
self.manager.write_location("myrpaas", "/", destination="router-myrpaas", router_mode=True)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/ROOT")
expected = nginx.NGINX_LOCATION_TEMPLATE_ROUTER.format(path="/",
host="router-myrpaas",
upstream="router-myrpaas",
https_only='')
self.assertEqual(expected, item[1]["Value"])
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/router-myrpaas")
self.assertEqual(None, item[1])
def test_write_location_non_root(self):
self.manager.write_location("myrpaas", "/admin/app_sites/",
destination="http://myapp.tsuru.io")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/___admin___app_sites___")
expected = nginx.NGINX_LOCATION_TEMPLATE_DEFAULT.format(path="/admin/app_sites/",
host="http://myapp.tsuru.io",
upstream="myapp.tsuru.io",
https_only='')
self.assertEqual(expected, item[1]["Value"])
def test_write_location_non_root_with_https(self):
self.manager.write_location("myrpaas", "/admin/app_sites/",
destination="http://myapp.tsuru.io", https_only=True)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/___admin___app_sites___")
expected = nginx.NGINX_LOCATION_TEMPLATE_DEFAULT.format(path="/admin/app_sites/",
host="http://myapp.tsuru.io",
upstream="myapp.tsuru.io",
https_only=nginx.NGINX_HTTPS_ONLY)
self.assertEqual(expected, item[1]["Value"])
def test_write_location_content(self):
self.manager.write_location("myrpaas", "/admin/app_sites/",
destination="http://myapp.tsuru.io",
content="something nice")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/___admin___app_sites___")
self.assertEqual("something nice", item[1]["Value"])
def test_write_location_content_utf8(self):
self.manager.write_location("myrpaas", "/admin/app_sites/",
destination="http://myapp.tsuru.io",
content='my content ☺')
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/___admin___app_sites___")
self.assertEqual('my content ☺', item[1]["Value"])
def test_write_location_content_strip(self):
self.manager.write_location("myrpaas", "/admin/app_sites/",
destination="http://myapp.tsuru.io",
content=" something nice \n")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/___admin___app_sites___")
self.assertEqual("something nice", item[1]["Value"])
def test_write_block_http_content(self):
self.manager.write_block("myrpaas", "http",
content=" something nice in http \n")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/blocks/http/ROOT")
expected_block = ("## Begin custom RpaaS http block ##\n"
"something nice in http"
"\n## End custom RpaaS http block ##")
self.assertEqual(expected_block, item[1]["Value"])
def test_write_block_server_content(self):
self.manager.write_block("myrpaas", "server",
content=" something nice in server \n")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/blocks/server/ROOT")
expected_block = ("## Begin custom RpaaS server block ##\n"
"something nice in server"
"\n## End custom RpaaS server block ##")
self.assertEqual(expected_block, item[1]["Value"])
def test_get_certificate(self):
origin_cert, origin_key = "cert", "key"
self.consul.kv.put("test-suite-rpaas/myrpaas/ssl/cert", origin_cert)
self.consul.kv.put("test-suite-rpaas/myrpaas/ssl/key", origin_key)
cert, key = self.manager.get_certificate("myrpaas")
self.assertEqual(origin_cert, cert)
self.assertEqual(origin_key, key)
def test_get_host_certificate(self):
origin_cert, origin_key = "cert", "key"
self.consul.kv.put("test-suite-rpaas/myrpaas/ssl/host-a/cert", origin_cert)
self.consul.kv.put("test-suite-rpaas/myrpaas/ssl/host-a/key", origin_key)
cert, key = self.manager.get_certificate("myrpaas", "host-a")
self.assertEqual(origin_cert, cert)
self.assertEqual(origin_key, key)
def test_get_certificate_undefined(self):
with self.assertRaises(consul_manager.CertificateNotFoundError):
self.manager.get_certificate("myrpaas")
def test_delete_certificate(self):
self.manager.set_certificate("myrpaas", "certificate", "key")
self.manager.delete_certificate("myrpaas")
with self.assertRaises(consul_manager.CertificateNotFoundError):
self.manager.get_certificate("myrpaas")
def test_delete_certificate_unknow_certificate(self):
self.manager.set_certificate("myrpaas", "cert", "key")
self.manager.delete_certificate("myrpaas2")
cert, key = self.manager.get_certificate("myrpaas")
self.assertEqual("cert", cert)
self.assertEqual("key", key)
def test_set_certificate(self):
self.manager.set_certificate("myrpaas", "certificate", "key")
cert_item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/cert")
self.assertEqual("certificate", cert_item[1]["Value"])
key_item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/key")
self.assertEqual("key", key_item[1]["Value"])
def test_set_host_certificate(self):
self.manager.set_certificate("myrpaas", "certificate", "key", "host-b")
cert_item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/host-b/cert")
self.assertEqual("certificate", cert_item[1]["Value"])
key_item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/host-b/key")
self.assertEqual("key", key_item[1]["Value"])
def test_set_certificate_crlf(self):
self.manager.set_certificate("myrpaas", "certificate\r\nvalid\r\n", "key\r\nvalid\r\n\r\n")
cert_item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/cert")
self.assertEqual("certificate\nvalid\n", cert_item[1]["Value"])
key_item = self.consul.kv.get("test-suite-rpaas/myrpaas/ssl/key")
self.assertEqual("key\nvalid\n\n", key_item[1]["Value"])
def test_remove_location_root(self):
self.manager.write_location("myrpaas", "/",
destination="http://myapp.tsuru.io",
content="something nice")
self.manager.remove_location("myrpaas", "/")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/ROOT")
self.assertIsNone(item[1])
def test_remove_location_non_root(self):
self.manager.write_location("myrpaas", "/admin/app_sites/",
destination="http://myapp.tsuru.io",
content="something nice")
self.manager.remove_location("myrpaas", "/admin/app_sites/")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/locations/___admin___app_sites___")
self.assertIsNone(item[1])
def test_remove_block_server_root(self):
self.manager.write_block("myrpaas", "server",
"something nice in server")
self.manager.remove_block("myrpaas", "server")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/blocks/server/ROOT")
empty_block_value = '## Begin custom RpaaS server block ##\n## End custom RpaaS server block ##'
self.assertEqual(item[1]['Value'], empty_block_value)
def test_remove_block_http_root(self):
self.manager.write_block("myrpaas", "http", "something nice in http")
self.manager.remove_block("myrpaas", "http")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/blocks/http/ROOT")
empty_block_value = '## Begin custom RpaaS http block ##\n## End custom RpaaS http block ##'
self.assertEqual(item[1]['Value'], empty_block_value)
def test_list_no_block(self):
items = self.manager.list_blocks("myrpaas")
self.assertEqual(items, [])
def test_list_one_block(self):
self.manager.write_block("myrpaas", "server",
"something nice in server")
items = self.manager.list_blocks("myrpaas")
self.assertEqual(1, len(items))
self.assertEqual("something nice in server", items[0]["content"])
def test_list_block(self):
self.manager.write_block("myrpaas", "server",
"something nice in server")
self.manager.write_block("myrpaas", "http", "something nice in http")
items = self.manager.list_blocks("myrpaas")
self.assertEqual(2, len(items))
self.assertEqual("something nice in http", items[0]["content"])
self.assertEqual("something nice in server", items[1]["content"])
def test_add_and_remove_block_return_empty(self):
items = self.manager.list_blocks("myrpaas")
self.assertEqual(items, [])
self.manager.write_block("myrpaas", "server",
"something nice in server")
items = self.manager.list_blocks("myrpaas")
self.assertEqual(1, len(items))
self.assertEqual("something nice in server", items[0]["content"])
self.manager.remove_block("myrpaas", "server")
items = self.manager.list_blocks("myrpaas")
self.assertEqual(items, [])
def test_write_lua_content(self):
self.manager.write_lua(
"myrpaas", "some_module", "server",
content=" something nice in lua \n"
)
expected_lua = (
"-- Begin custom RpaaS some_module lua module --\n"
"something nice in lua"
"\n-- End custom RpaaS some_module lua module --"
)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/lua_module/server/some_module")
self.assertEqual(expected_lua, item[1]["Value"])
def test_remove_lua_module(self):
self.manager.write_lua("myrpaas", "some_module", "server", "something nice in server")
self.manager.remove_lua("myrpaas", "some_module", "server")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/lua_module/server/some_module")
empty_block_value = """-- Begin custom RpaaS some_module lua module --
\n-- End custom RpaaS some_module lua module --"""
self.assertEqual(item[1]['Value'], empty_block_value)
def test_remove_lua_module_non_existent_block(self):
self.manager.remove_lua("myrpaas", "some_module", "server")
item = self.consul.kv.get("test-suite-rpaas/myrpaas/lua_module/server/some_module")
empty_block_value = """-- Begin custom RpaaS some_module lua module --
\n-- End custom RpaaS some_module lua module --"""
self.assertEqual(item[1]['Value'], empty_block_value)
def test_upstream_add_to_empty_upstrem(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server1"]), servers)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/upstream1")
block = '## Begin custom RpaaS upstream block ##\nserver1\n## End custom RpaaS upstream block ##'
self.assertEqual(item[1]['Value'], block)
def test_upstream_add_existing_server_to_upstream(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server1"]), servers)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/upstream1")
block = '## Begin custom RpaaS upstream block ##\nserver1\n## End custom RpaaS upstream block ##'
self.assertEqual(item[1]['Value'], block)
def test_upstream_add_bulk_to_existing_upstream(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
self.manager.add_server_upstream("myrpaas", "upstream1", ["server1", "server2", "server3"])
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server1", "server2", "server3"]), servers)
def test_upstream_add_bulk_urls_to_existing_upstream(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "http://server1:123")
self.manager.add_server_upstream("myrpaas", "upstream1", ["http://server1:123", "http://server2:456",
"http://server3:789"])
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server3:789", "server2:456", "server1:123"]), servers)
def test_upstream_remove_server_from_upstream(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
self.manager.add_server_upstream("myrpaas", "upstream1", "server2")
self.manager.add_server_upstream("myrpaas", "upstream1", "server3")
self.manager.remove_server_upstream("myrpaas", "upstream1", "server2")
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server1", "server3"]), servers)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/upstream1")
block = '## Begin custom RpaaS upstream block ##\nserver1,server3\n## End custom RpaaS upstream block ##'
self.assertEqual(item[1]['Value'], block)
def test_upstream_remove_delete_empty_upstream_after_last_server_removed(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
self.manager.remove_server_upstream("myrpaas", "upstream1", "server1")
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(), servers)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/upstream1")
block = '## Begin custom RpaaS upstream block ##\n## End custom RpaaS upstream block ##'
self.assertEqual(item[1]['Value'], block)
def test_upstream_remove_delete_empty_upstream_and_create_new_one_same_item(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
self.manager.remove_server_upstream("myrpaas", "upstream1", "server1")
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(['server1']), servers)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/upstream1")
block = '## Begin custom RpaaS upstream block ##\nserver1\n## End custom RpaaS upstream block ##'
self.assertEqual(item[1]['Value'], block)
def test_upstream_remove_server_not_found_on_upstream(self):
self.manager.add_server_upstream("myrpaas", "upstream1", "server1")
self.manager.remove_server_upstream("myrpaas", "upstream1", "server2")
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server1"]), servers)
item = self.consul.kv.get("test-suite-rpaas/myrpaas/upstream/upstream1")
block = '## Begin custom RpaaS upstream block ##\nserver1\n## End custom RpaaS upstream block ##'
self.assertEqual(item[1]['Value'], block)
def test_upstream_remove_bulk_to_existing_upstream(self):
self.manager.add_server_upstream("myrpaas", "upstream1", ["server1", "server2", "server3"])
self.manager.remove_server_upstream("myrpaas", "upstream1", ["server2", "server3", "server4"])
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server1"]), servers)
def test_upstream_remove_bulk_urls_on_existing_upstream(self):
self.manager.add_server_upstream("myrpaas", "upstream1", ["http://server1:123", "http://server2:456",
"http://server3:789"])
self.manager.remove_server_upstream("myrpaas", "upstream1", ["http://server2:456", "http://server3:789",
"http://server4:333"])
servers = self.manager.list_upstream("myrpaas", "upstream1")
self.assertEqual(set(["server1:123"]), servers)
def test_find_acl_networks_return_empty(self):
acls = self.manager.find_acl_network("myrpaas", "10.0.0.1/32")
self.assertEqual([], acls)
def test_find_acl_networks_return_one_dst(self):
self.consul.kv.put("test-suite-rpaas/myrpaas/acl/10.0.0.1_32", "192.168.0.0/24")
acls = self.manager.find_acl_network("myrpaas", "10.0.0.1/32")
self.assertEqual(acls, [{'source': '10.0.0.1/32', 'destination': ['192.168.0.0/24']}])
def test_find_acl_networks_return_many_dst(self):
self.consul.kv.put("test-suite-rpaas/myrpaas/acl/10.0.0.1_32", "192.168.0.0/24,10.0.0.0/24")
self.consul.kv.put("test-suite-rpaas/myrpaas/acl/10.0.0.2_32", "192.168.1.0/24,10.0.1.0/24")
acls = self.manager.find_acl_network("myrpaas", "10.0.0.1/32")
self.assertEqual(acls, [{'source': '10.0.0.1/32', 'destination': ['192.168.0.0/24', '10.0.0.0/24']}])
def test_find_acl_networks_return_all_acls(self):
self.consul.kv.put("test-suite-rpaas/myrpaas/acl/10.0.0.1_32", "192.168.0.0/24,10.0.0.0/24")
self.consul.kv.put("test-suite-rpaas/myrpaas/acl/10.0.0.2_32", "192.168.1.0/24,10.0.0.0/24")
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual(acls, [{'source': '10.0.0.1/32', 'destination': ['192.168.0.0/24', '10.0.0.0/24']},
{'source': '10.0.0.2/32', 'destination': ['192.168.1.0/24', '10.0.0.0/24']}])
def test_store_acl_network_successfully(self):
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([], acls)
self.manager.store_acl_network("myrpaas", "10.0.0.1/32", "192.168.0.0/24")
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([{'source': '10.0.0.1/32', 'destination': ['192.168.0.0/24']}], acls)
def test_store_acl_network_already_exist_entry(self):
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([], acls)
self.manager.store_acl_network("myrpaas", "10.0.0.1/32", "192.168.0.0/24")
self.manager.store_acl_network("myrpaas", "10.0.0.1/32", "192.168.0.0/24")
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([{'source': '10.0.0.1/32', 'destination': ['192.168.0.0/24']}], acls)
def test_store_acl_network_append_to_already_exist_src_entry(self):
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([], acls)
self.manager.store_acl_network("myrpaas", "10.0.0.1/32", "192.168.0.0/24")
self.manager.store_acl_network("myrpaas", "10.0.0.1/32", "192.168.1.0/24")
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([{'source': '10.0.0.1/32', 'destination': ['192.168.0.0/24', '192.168.1.0/24']}], acls)
def test_remove_acl_network_successfully(self):
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([], acls)
self.manager.store_acl_network("myrpaas", "10.0.0.1/32", "192.168.0.0/24")
self.manager.store_acl_network("myrpaas", "10.0.0.2/32", "192.168.1.0/24")
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([{'source': '10.0.0.1/32', 'destination': ['192.168.0.0/24']},
{'source': '10.0.0.2/32', 'destination': ['192.168.1.0/24']}], acls)
self.manager.remove_acl_network("myrpaas", "10.0.0.1/32")
acls = self.manager.find_acl_network("myrpaas")
self.assertEqual([{'source': '10.0.0.2/32', 'destination': ['192.168.1.0/24']}], acls)
def test_swap_empty_instances_successfully(self):
self.manager.swap_instances("myrpaas-1", "myrpaas-2")
myrpaas_1_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-1/swap")[1]['Value']
myrpaas_2_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-2/swap")[1]['Value']
self.assertEqual(myrpaas_1_swap, "myrpaas-2")
self.assertEqual(myrpaas_2_swap, "myrpaas-1")
def test_swap_already_swapped_instances_same_order_successfully(self):
self.manager.swap_instances("myrpaas-1", "myrpaas-2")
myrpaas_1_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-1/swap")[1]['Value']
myrpaas_2_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-2/swap")[1]['Value']
self.assertEqual(myrpaas_1_swap, "myrpaas-2")
self.assertEqual(myrpaas_2_swap, "myrpaas-1")
self.manager.swap_instances("myrpaas-1", "myrpaas-2")
myrpaas_1_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-1/swap")[1]
myrpaas_2_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-2/swap")[1]
self.assertIsNone(myrpaas_1_swap)
self.assertIsNone(myrpaas_2_swap)
def test_swap_already_swapped_instances_different_order_successfully(self):
self.manager.swap_instances("myrpaas-1", "myrpaas-2")
myrpaas_1_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-1/swap")[1]['Value']
myrpaas_2_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-2/swap")[1]['Value']
self.assertEqual(myrpaas_1_swap, "myrpaas-2")
self.assertEqual(myrpaas_2_swap, "myrpaas-1")
self.manager.swap_instances("myrpaas-2", "myrpaas-1")
myrpaas_1_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-1/swap")[1]
myrpaas_2_swap = self.consul.kv.get("test-suite-rpaas/myrpaas-2/swap")[1]
self.assertIsNone(myrpaas_1_swap)
self.assertIsNone(myrpaas_2_swap)
def test_swap_already_swapped_instance_fail(self):
self.manager.swap_instances("myrpaas-1", "myrpaas-2")
with self.assertRaises(consul_manager.InstanceAlreadySwappedError):
self.manager.swap_instances("myrpaas-1", "myrpaas-3")
def test_swap_already_swapped_instances_fail(self):
self.manager.swap_instances("myrpaas-1", "myrpaas-2")
self.manager.swap_instances("myrpaas-3", "myrpaas-4")
with self.assertRaises(consul_manager.InstanceAlreadySwappedError):
self.manager.swap_instances("myrpaas-1", "myrpaas-3")
def test_swap_already_swapped_instance_with_not_swapped_fail(self):
self.manager.swap_instances("myrpaas-1", "myrpaas-2")
with self.assertRaises(consul_manager.InstanceAlreadySwappedError):
self.manager.swap_instances("myrpaas-1", "myrpaas-3")
| {
"content_hash": "65afd192d042eda1356b0f2759221481",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 113,
"avg_line_length": 56.98518518518519,
"alnum_prop": 0.6086052255297023,
"repo_name": "tsuru/rpaas",
"id": "cb1c9a2a301bac9478262423eb7ccf71ae93b25e",
"size": "30957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_consul_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1723"
},
{
"name": "Procfile",
"bytes": "18"
},
{
"name": "Python",
"bytes": "541935"
},
{
"name": "Shell",
"bytes": "2261"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from galleries.models import Gallery
class Command(BaseCommand):
args = '<gallery_id>'
help = 'Creates Thumbnails for a Gallery'
def handle(self, *args, **options):
for gallery_id in args:
try:
gallery = Gallery.objects.get(pk=int(gallery_id))
from django.conf import settings
from PIL import Image as Imager
import os
for image in gallery.image_set.all():
i = Imager.open(image.image)
file, ext = os.path.splitext(image.image.path)
width, height = i.size
if width > height:
delta = width - height
left = int(delta/2)
upper = 0
right = height + left
lower = height
else:
delta = height - width
left = 0
upper = int(delta/2)
right = width
lower = width + upper
i = i.crop((left, upper, right, lower))
i.thumbnail(settings.GALLERIES_THUMBNAIL_SIZE, Imager.ANTIALIAS)
if ext == in (".jpg", ".JPG", ".jpeg", ".JPEG"):
i.save(file + " thumbnail.jpg")
elif ext == in (".png", ".PNG"):
i.save(file + " thumbnail.png")
except Gallery.DoesNotExist:
raise CommandError('Gallery "%s" does not exist' % gallery_id)
self.stdout.write('Successfully created images for Gallery "%s"' % gallery_id)
| {
"content_hash": "3a03ac79097ab8fcb51cf5f1f1eae61d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 90,
"avg_line_length": 40.5,
"alnum_prop": 0.46520763187429853,
"repo_name": "caa/django-galleries",
"id": "15ad8f79902e2779b0d012549f2fffddef3bf3be",
"size": "1782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galleries/management/commands/generate_thumbnails.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10118"
}
],
"symlink_target": ""
} |
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug import debug_data
from tensorflow.python.debug import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _generate_dump_from_simple_addition_graph(self):
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, "Node 'foo_bar' does not exist in partition graphs."):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session() as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s" % u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "%s" % s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsNone(u_vals[0])
self.assertEqual(1, len(s_vals))
self.assertIsNone(s_vals[0])
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session() as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(cond, body, [i], parallel_iterations=1)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
self.assertTrue(os.path.isdir(os.path.join(self._dump_root, u_namespace)))
self.assertTrue(
os.path.isdir(os.path.join(self._dump_root, v_namespace, "v")))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session() as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session() as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")))
u_file_path = dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
u_timestamp = int(u_file_path[u_file_path.rindex("_") + 1:])
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
# Swap the time stamps
new_u_file_path = u_file_path[:u_file_path.rindex(
"_")] + "_%d" % v_timestamp
new_v_file_path = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % u_timestamp
os.rename(u_file_path, new_u_file_path)
os.rename(v_file_path, new_v_file_path)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
result = sess.run(y, options=run_options, run_metadata=run_metadata)
self.assertAllClose([2, 4, 7], result)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(c, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 2.0, 2.0, 3.0, 2.0, 5.0, 4.0, -3.0, 7.0, 0.85714286,
8.97959184
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(a.initializer, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls())
sess.run(w, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "9022f6d9e8504f421cb3e416dcc6052f",
"timestamp": "",
"source": "github",
"line_count": 1011,
"max_line_length": 80,
"avg_line_length": 39.453016815034616,
"alnum_prop": 0.6364981071527064,
"repo_name": "ryfeus/lambda-packs",
"id": "2b700facd7d66667175cb5cde768edd23e838bb0",
"size": "40576",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Keras_tensorflow/source/tensorflow/python/debug/session_debug_testlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
r"""
.. dialect:: mysql+asyncmy
:name: asyncmy
:dbapi: asyncmy
:connectstring: mysql+asyncmy://user:password@host:port/dbname[?key=value&key=value...]
:url: https://github.com/long2ice/asyncmy
.. note:: The asyncmy dialect as of September, 2021 was added to provide
MySQL/MariaDB asyncio compatibility given that the :ref:`aiomysql` database
driver has become unmaintained, however asyncmy is itself very new.
Using a special asyncio mediation layer, the asyncmy dialect is usable
as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
extension package.
This dialect should normally be used only with the
:func:`_asyncio.create_async_engine` engine creation function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("mysql+asyncmy://user:pass@hostname/dbname?charset=utf8mb4")
""" # noqa
from contextlib import asynccontextmanager
from .pymysql import MySQLDialect_pymysql
from ... import pool
from ... import util
from ...engine import AdaptedConnection
from ...util.concurrency import asyncio
from ...util.concurrency import await_fallback
from ...util.concurrency import await_only
class AsyncAdapt_asyncmy_cursor:
server_side = False
__slots__ = (
"_adapt_connection",
"_connection",
"await_",
"_cursor",
"_rows",
)
def __init__(self, adapt_connection):
self._adapt_connection = adapt_connection
self._connection = adapt_connection._connection
self.await_ = adapt_connection.await_
cursor = self._connection.cursor()
self._cursor = self.await_(cursor.__aenter__())
self._rows = []
@property
def description(self):
return self._cursor.description
@property
def rowcount(self):
return self._cursor.rowcount
@property
def arraysize(self):
return self._cursor.arraysize
@arraysize.setter
def arraysize(self, value):
self._cursor.arraysize = value
@property
def lastrowid(self):
return self._cursor.lastrowid
def close(self):
# note we aren't actually closing the cursor here,
# we are just letting GC do it. to allow this to be async
# we would need the Result to change how it does "Safe close cursor".
# MySQL "cursors" don't actually have state to be "closed" besides
# exhausting rows, which we already have done for sync cursor.
# another option would be to emulate aiosqlite dialect and assign
# cursor only if we are doing server side cursor operation.
self._rows[:] = []
def execute(self, operation, parameters=None):
return self.await_(self._execute_async(operation, parameters))
def executemany(self, operation, seq_of_parameters):
return self.await_(
self._executemany_async(operation, seq_of_parameters)
)
async def _execute_async(self, operation, parameters):
async with self._adapt_connection._mutex_and_adapt_errors():
if parameters is None:
result = await self._cursor.execute(operation)
else:
result = await self._cursor.execute(operation, parameters)
if not self.server_side:
# asyncmy has a "fake" async result, so we have to pull it out
# of that here since our default result is not async.
# we could just as easily grab "_rows" here and be done with it
# but this is safer.
self._rows = list(await self._cursor.fetchall())
return result
async def _executemany_async(self, operation, seq_of_parameters):
async with self._adapt_connection._mutex_and_adapt_errors():
return await self._cursor.executemany(operation, seq_of_parameters)
def setinputsizes(self, *inputsizes):
pass
def __iter__(self):
while self._rows:
yield self._rows.pop(0)
def fetchone(self):
if self._rows:
return self._rows.pop(0)
else:
return None
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
retval = self._rows[0:size]
self._rows[:] = self._rows[size:]
return retval
def fetchall(self):
retval = self._rows[:]
self._rows[:] = []
return retval
class AsyncAdapt_asyncmy_ss_cursor(AsyncAdapt_asyncmy_cursor):
__slots__ = ()
server_side = True
def __init__(self, adapt_connection):
self._adapt_connection = adapt_connection
self._connection = adapt_connection._connection
self.await_ = adapt_connection.await_
cursor = self._connection.cursor(
adapt_connection.dbapi.asyncmy.cursors.SSCursor
)
self._cursor = self.await_(cursor.__aenter__())
def close(self):
if self._cursor is not None:
self.await_(self._cursor.close())
self._cursor = None
def fetchone(self):
return self.await_(self._cursor.fetchone())
def fetchmany(self, size=None):
return self.await_(self._cursor.fetchmany(size=size))
def fetchall(self):
return self.await_(self._cursor.fetchall())
class AsyncAdapt_asyncmy_connection(AdaptedConnection):
await_ = staticmethod(await_only)
__slots__ = ("dbapi", "_execute_mutex")
def __init__(self, dbapi, connection):
self.dbapi = dbapi
self._connection = connection
self._execute_mutex = asyncio.Lock()
@asynccontextmanager
async def _mutex_and_adapt_errors(self):
async with self._execute_mutex:
try:
yield
except AttributeError:
raise self.dbapi.InternalError(
"network operation failed due to asyncmy attribute error"
)
def ping(self, reconnect):
assert not reconnect
return self.await_(self._do_ping())
async def _do_ping(self):
async with self._mutex_and_adapt_errors():
return await self._connection.ping(False)
def character_set_name(self):
return self._connection.character_set_name()
def autocommit(self, value):
self.await_(self._connection.autocommit(value))
def cursor(self, server_side=False):
if server_side:
return AsyncAdapt_asyncmy_ss_cursor(self)
else:
return AsyncAdapt_asyncmy_cursor(self)
def rollback(self):
self.await_(self._connection.rollback())
def commit(self):
self.await_(self._connection.commit())
def close(self):
# it's not awaitable.
self._connection.close()
class AsyncAdaptFallback_asyncmy_connection(AsyncAdapt_asyncmy_connection):
__slots__ = ()
await_ = staticmethod(await_fallback)
def _Binary(x):
"""Return x as a binary type."""
return bytes(x)
class AsyncAdapt_asyncmy_dbapi:
def __init__(self, asyncmy):
self.asyncmy = asyncmy
self.paramstyle = "format"
self._init_dbapi_attributes()
def _init_dbapi_attributes(self):
for name in (
"Warning",
"Error",
"InterfaceError",
"DataError",
"DatabaseError",
"OperationalError",
"InterfaceError",
"IntegrityError",
"ProgrammingError",
"InternalError",
"NotSupportedError",
):
setattr(self, name, getattr(self.asyncmy.errors, name))
STRING = util.symbol("STRING")
NUMBER = util.symbol("NUMBER")
BINARY = util.symbol("BINARY")
DATETIME = util.symbol("DATETIME")
TIMESTAMP = util.symbol("TIMESTAMP")
Binary = staticmethod(_Binary)
def connect(self, *arg, **kw):
async_fallback = kw.pop("async_fallback", False)
if util.asbool(async_fallback):
return AsyncAdaptFallback_asyncmy_connection(
self,
await_fallback(self.asyncmy.connect(*arg, **kw)),
)
else:
return AsyncAdapt_asyncmy_connection(
self,
await_only(self.asyncmy.connect(*arg, **kw)),
)
class MySQLDialect_asyncmy(MySQLDialect_pymysql):
driver = "asyncmy"
supports_statement_cache = True
supports_server_side_cursors = True
_sscursor = AsyncAdapt_asyncmy_ss_cursor
is_async = True
@classmethod
def import_dbapi(cls):
return AsyncAdapt_asyncmy_dbapi(__import__("asyncmy"))
@classmethod
def get_pool_class(cls, url):
async_fallback = url.query.get("async_fallback", False)
if util.asbool(async_fallback):
return pool.FallbackAsyncAdaptedQueuePool
else:
return pool.AsyncAdaptedQueuePool
def create_connect_args(self, url):
return super().create_connect_args(
url, _translate_args=dict(username="user", database="db")
)
def is_disconnect(self, e, connection, cursor):
if super().is_disconnect(e, connection, cursor):
return True
else:
str_e = str(e).lower()
return (
"not connected" in str_e or "network operation failed" in str_e
)
def _found_rows_client_flag(self):
from asyncmy.constants import CLIENT
return CLIENT.FOUND_ROWS
def get_driver_connection(self, connection):
return connection._connection
dialect = MySQLDialect_asyncmy
| {
"content_hash": "d829492a000db155a57d5d5db92d8971",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 93,
"avg_line_length": 29.77570093457944,
"alnum_prop": 0.6166562042268257,
"repo_name": "sqlalchemy/sqlalchemy",
"id": "df8965cbbd8de7921a1af36b816d2abef2650ec3",
"size": "9818",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "lib/sqlalchemy/dialects/mysql/asyncmy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_high_general_old_twilek_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","twilek_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "15a40bde1c7ad7113755d208e7c9891c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 93,
"avg_line_length": 25.384615384615383,
"alnum_prop": 0.706060606060606,
"repo_name": "anhstudios/swganh",
"id": "dc14708e5b2b30254eb3c8b44a82ecce103d89d3",
"size": "475",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_rebel_high_general_old_twilek_female_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
WSGI config for blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
from __future__ import absolute_import, unicode_literals
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.settings.dev")
application = get_wsgi_application()
| {
"content_hash": "4b713381ee7c229e09b27bc36a4fb7f1",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.7678571428571429,
"repo_name": "rocity/wagtail-blog",
"id": "04da6fc614f4fc45b9b18c5cfc2a28e55b887076",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6675"
},
{
"name": "Python",
"bytes": "20788"
}
],
"symlink_target": ""
} |
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_browse as h2b
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
@classmethod
def tearDownClass(cls):
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_rf_predict_fvec(self):
h2b.browseTheCloud()
SYNDATASETS_DIR = h2o.make_syn_dir()
trees = 6
timeoutSecs = 20
hex_key = 'iris2.csv.hex'
parseResult = h2i.import_parse(bucket='smalldata', path='iris/iris2.csv', schema='put', hex_key=hex_key)
h2o_cmd.runRF(parseResult=parseResult, ntrees=trees, destination_key="iris_rf_model", timeoutSecs=timeoutSecs)
print "Use H2O GeneratePredictionsPage with a H2O generated model and the same data key. Inspect/Summary result"
start = time.time()
predict = h2o.nodes[0].generate_predictions(model_key="iris_rf_model", data_key=hex_key,
prediction='predict.hex')
print "generate_predictions end on ", hex_key, " took", time.time() - start, 'seconds'
print "predict:", h2o.dump_json(predict)
csvPredictPathname = SYNDATASETS_DIR + "/" + "iris2.predict.csv"
h2o.nodes[0].csv_download(src_key='predict.hex', csvPathname=csvPredictPathname)
inspect = h2o_cmd.runInspect(key='predict.hex')
print "inspect:", h2o.dump_json(inspect)
# print h2o.dump_json(predict)
# no min/max any more with enums?
expectedCols = {
# "max": 2.0,
# "mean": 1.0,
# "min": 0.0,
"naCnt": 0,
# "name": 0,
# Enum or real?
# "type": "Real",
}
predictCols = inspect['cols'][0]
diffKeys = [k for k in expectedCols if predictCols[k] != expectedCols[k]]
for k in diffKeys:
raise Exception ("Checking H2O summary results, wrong %s: %s, should be: %s" % (k, predictCols[k], expectedCols[k]))
expected = {
"numRows": 150,
"numCols": 4,
# "byteSize": 2843,
}
diffKeys = [k for k in expected if inspect[k] != expected[k]]
print "diffKeys", diffKeys
for k in diffKeys:
raise Exception ("%s : %s != %s" % (k, inspect[k], expected[k]))
if __name__ == '__main__':
h2o.unit_main()
| {
"content_hash": "53507ef8a3494641aad90dac51bb20c2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 128,
"avg_line_length": 34.985915492957744,
"alnum_prop": 0.5700483091787439,
"repo_name": "eg-zhang/h2o-2",
"id": "44c28a6eaceae54c40ee94ad5603c3ab5eca692e",
"size": "2484",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "py/testdir_single_jvm/test_rf_predict_fvec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7065"
},
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "216906"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "HTML",
"bytes": "177967"
},
{
"name": "Java",
"bytes": "5177683"
},
{
"name": "JavaScript",
"bytes": "42958"
},
{
"name": "Makefile",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "22594"
},
{
"name": "Python",
"bytes": "3244626"
},
{
"name": "R",
"bytes": "1631216"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "189829"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class PauseServerSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pause-server"
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(PauseServerSamplesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.admin_actions.Admin_actions')
return f
def setUp(self):
"""setUp Method for PauseServer api samples extension
This method creates the server that will be used in each test
"""
super(PauseServerSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
def test_post_pause(self):
# Get api samples to pause server request.
response = self._do_post('servers/%s/action' % self.uuid,
'pause-server', {})
self.assertEqual(202, response.status_code)
def test_post_unpause(self):
# Get api samples to unpause server request.
self.test_post_pause()
response = self._do_post('servers/%s/action' % self.uuid,
'unpause-server', {})
self.assertEqual(202, response.status_code)
| {
"content_hash": "8fec054c1c7a073b07d59306c29d1659",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 36.51219512195122,
"alnum_prop": 0.6319305277221109,
"repo_name": "takeshineshiro/nova",
"id": "c7b1d5f1159e36515a68285c5a77a7068f89fdbb",
"size": "2129",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/functional/v3/test_pause_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16467436"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "285755"
}
],
"symlink_target": ""
} |
import tests.periodicities.period_test as per
per.buildModel((12 , 'BH' , 100));
| {
"content_hash": "6f059b4dd89ee767e01297c370364a02",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 20.75,
"alnum_prop": 0.7108433734939759,
"repo_name": "antoinecarme/pyaf",
"id": "742e79ed955bde72b1ac368d57e0db151815a140",
"size": "83",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/periodicities/Business_Hour/Cycle_Business_Hour_100_BH_12.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
.. warning :: This module will eventually be deprecated in favor of `brmp <https://github.com/pyro-ppl/brmp/>`_
The :mod:`pyro.contrib.oed.glmm` module provides models and guides for
generalised linear mixed models (GLMM). It also includes the
Normal-inverse-gamma family.
To create a classical Bayesian linear model, use::
from pyro.contrib.oed.glmm import known_covariance_linear_model
# Note: coef is a p-vector, observation_sd is a scalar
# Here, p=1 (one feature)
model = known_covariance_linear_model(coef_mean=torch.tensor([0.]),
coef_sd=torch.tensor([10.]),
observation_sd=torch.tensor(2.))
# An n x p design tensor
# Here, n=2 (two observations)
design = torch.tensor(torch.tensor([[1.], [-1.]]))
model(design)
A non-linear link function may be introduced, for instance::
from pyro.contrib.oed.glmm import logistic_regression_model
# No observation_sd is needed for logistic models
model = logistic_regression_model(coef_mean=torch.tensor([0.]),
coef_sd=torch.tensor([10.]))
Random effects may be incorporated as regular Bayesian regression coefficients.
For random effects with a shared covariance matrix, see :meth:`pyro.contrib.oed.glmm.lmer_model`.
"""
from pyro.contrib.oed.glmm import guides # noqa: F401
from pyro.contrib.oed.glmm.glmm import * # noqa: F403,F401
| {
"content_hash": "9fd0011c4810fd46086725f5011ed3d5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 111,
"avg_line_length": 39.513513513513516,
"alnum_prop": 0.66484268125855,
"repo_name": "uber/pyro",
"id": "c17c2212134628f35109c002106f6de8c227868d",
"size": "1551",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyro/contrib/oed/glmm/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6121"
},
{
"name": "CSS",
"bytes": "478"
},
{
"name": "Dockerfile",
"bytes": "1635"
},
{
"name": "Makefile",
"bytes": "6857"
},
{
"name": "Python",
"bytes": "3388193"
},
{
"name": "Shell",
"bytes": "6465"
},
{
"name": "TeX",
"bytes": "3649"
}
],
"symlink_target": ""
} |
from urllib.parse import parse_qs, urlencode, urlparse
from loguru import logger
from requests.exceptions import RequestException
from flexget import plugin
from flexget.components.sites.urlrewriting import UrlRewritingError
from flexget.event import event
logger = logger.bind(name='rutracker')
class SiteRutracker:
schema = {'type': 'boolean'}
base_url = 'https://api.t-ru.org'
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
return url.startswith('https://rutracker.org/forum/viewtopic.php?t=')
@plugin.internet(logger)
def url_rewrite(self, task, entry):
"""
Gets torrent information for topic from rutracker api
"""
url = entry['url']
logger.info('rewriting download url: {}', url)
topic_id = parse_qs(urlparse(url).query)['t'][0]
api_url = f"{self.base_url}/v1/get_tor_topic_data"
api_params = {
'by': 'topic_id',
'val': topic_id,
}
try:
topic_request = task.requests.get(api_url, params=api_params)
except RequestException as e:
raise UrlRewritingError(f'rutracker request failed: {e}')
topic = topic_request.json()['result'][topic_id]
magnet = {
'xt': f"urn:btih:{topic['info_hash']}",
'tr': [f'http://bt{i}.t-ru.org/ann?magnet' for i in ['', '2', '3', '4']],
'dn': topic['topic_title'],
}
magnet_qs = urlencode(magnet, doseq=True, safe=':')
magnet_uri = f"magnet:?{magnet_qs}"
entry['url'] = magnet_uri
@event('plugin.register')
def register_plugin():
plugin.register(SiteRutracker, 'rutracker', interfaces=['urlrewriter'], api_ver=2)
| {
"content_hash": "15c4c26c7886f1f1933f429d6c4c88d4",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 86,
"avg_line_length": 30.189655172413794,
"alnum_prop": 0.604797258709309,
"repo_name": "Flexget/Flexget",
"id": "510c48cb1b2715b4d62b5b870e1025bfb691d9a9",
"size": "1776",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/components/sites/sites/site_rutracker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1237"
},
{
"name": "HTML",
"bytes": "82565"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3797883"
},
{
"name": "SCSS",
"bytes": "11875"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1568"
}
],
"symlink_target": ""
} |
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import subprocess
from . import events
from . import protocols
from . import streams
from . import tasks
from .coroutines import coroutine
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append('stdin=%r' % self.stdin)
if self.stdout is not None:
info.append('stdout=%r' % self.stdout)
if self.stderr is not None:
info.append('stderr=%r' % self.stderr)
return '<%s>' % ' '.join(info)
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader != None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
def process_exited(self):
self._transport.close()
self._transport = None
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pid)
@property
def returncode(self):
return self._transport.get_returncode()
@coroutine
def wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
return (yield from self._transport._wait())
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
@coroutine
def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
yield from self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
@coroutine
def _noop(self):
return None
@coroutine
def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = yield from stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
@coroutine
def communicate(self, input=None):
if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
loop=self._loop)
yield from self.wait()
return (stdout, stderr)
@coroutine
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
@coroutine
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| {
"content_hash": "32e33f495eeede727d9650fe81b34394",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 78,
"avg_line_length": 33.774647887323944,
"alnum_prop": 0.526827912149013,
"repo_name": "lunixbochs/actualvim",
"id": "b2f5304f772121de9c60691a9cc5499cfd07a168",
"size": "7194",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "lib/asyncio/subprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "798712"
}
],
"symlink_target": ""
} |
import sys
if sys.platform == 'win32':
import wmi
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
class LiveMigrationUtils(object):
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
self._volutils = volumeutilsv2.VolumeUtilsV2()
def _get_conn_v2(self, host='localhost'):
try:
return wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
except wmi.x_wmi as ex:
LOG.exception(ex)
if ex.com_error.hresult == -2147217394:
msg = (_('Live migration is not supported on target host "%s"')
% host)
elif ex.com_error.hresult == -2147023174:
msg = (_('Target live migration host "%s" is unreachable')
% host)
else:
msg = _('Live migration failed: %s') % ex.message
raise vmutils.HyperVException(msg)
def check_live_migration_config(self):
conn_v2 = self._get_conn_v2()
migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0]
vsmssds = migration_svc.associators(
wmi_association_class='Msvm_ElementSettingData',
wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')
vsmssd = vsmssds[0]
if not vsmssd.EnableVirtualSystemMigration:
raise vmutils.HyperVException(
_('Live migration is not enabled on this host'))
if not migration_svc.MigrationServiceListenerIPAddressList:
raise vmutils.HyperVException(
_('Live migration networks are not configured on this host'))
def _get_vm(self, conn_v2, vm_name):
vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if not n:
raise exception.NotFound(_('VM not found: %s') % vm_name)
elif n > 1:
raise vmutils.HyperVException(_('Duplicate VM name found: %s')
% vm_name)
return vms[0]
def _destroy_planned_vm(self, conn_v2_remote, planned_vm):
LOG.debug("Destroying existing remote planned VM: %s",
planned_vm.ElementName)
vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
self._vmutils.check_ret_val(ret_val, job_path)
def _check_existing_planned_vm(self, conn_v2_remote, vm):
# Make sure that there's not yet a remote planned VM on the target
# host for this VM
planned_vms = conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)
if planned_vms:
self._destroy_planned_vm(conn_v2_remote, planned_vms[0])
def _create_remote_planned_vm(self, conn_v2_local, conn_v2_remote,
vm, rmt_ip_addr_list, dest_host):
# Staged
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32770")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
migration_setting_data = vsmsd.GetText_(1)
LOG.debug("Creating remote planned VM for VM: %s",
vm.ElementName)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
return conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)[0]
def _get_physical_disk_paths(self, vm_name):
ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
ide_paths = self._vmutils.get_controller_volume_paths(ide_ctrl_path)
scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name)
scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path)
return dict(ide_paths.items() + scsi_paths.items())
def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host):
volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host)
disk_paths_remote = {}
for (rasd_rel_path, disk_path) in disk_paths.items():
target = self._volutils.get_target_from_disk_path(disk_path)
if target:
(target_iqn, target_lun) = target
dev_num = volutils_remote.get_device_number_for_target(
target_iqn, target_lun)
disk_path_remote = (
vmutils_remote.get_mounted_disk_by_drive_number(dev_num))
disk_paths_remote[rasd_rel_path] = disk_path_remote
else:
LOG.debug("Could not retrieve iSCSI target "
"from disk path: %s", disk_path)
return disk_paths_remote
def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote,
planned_vm, vm_name,
disk_paths_remote):
vm_settings = planned_vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
updated_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent')
for sasd in sasds:
if (sasd.ResourceType == 17 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Physical Disk Drive" and
sasd.HostResource):
# Replace the local disk target with the correct remote one
old_disk_path = sasd.HostResource[0]
new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
LOG.debug("Replacing host resource "
"%(old_disk_path)s with "
"%(new_disk_path)s on planned VM %(vm_name)s",
{'old_disk_path': old_disk_path,
'new_disk_path': new_disk_path,
'vm_name': vm_name})
sasd.HostResource = [new_disk_path]
updated_resource_setting_data.append(sasd.GetText_(1))
LOG.debug("Updating remote planned VM disk paths for VM: %s",
vm_name)
vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
ResourceSettings=updated_resource_setting_data)
vmutils_remote.check_ret_val(ret_val, job_path)
def _get_vhd_setting_data(self, vm):
vm_settings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
new_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
wmi_result_class='Msvm_StorageAllocationSettingData')
for sasd in sasds:
if (sasd.ResourceType == 31 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Virtual Hard Disk"):
new_resource_setting_data.append(sasd.GetText_(1))
return new_resource_setting_data
def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host):
# VirtualSystemAndStorage
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32771")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
if planned_vm:
vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name
migration_setting_data = vsmsd.GetText_(1)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
LOG.debug("Starting live migration for VM: %s", vm.ElementName)
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data,
NewResourceSettingData=new_resource_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_remote_ip_address_list(self, conn_v2_remote, dest_host):
LOG.debug("Getting live migration networks for remote host: %s",
dest_host)
migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
return migr_svc_rmt.MigrationServiceListenerIPAddressList
def live_migrate_vm(self, vm_name, dest_host):
self.check_live_migration_config()
conn_v2_local = self._get_conn_v2()
conn_v2_remote = self._get_conn_v2(dest_host)
vm = self._get_vm(conn_v2_local, vm_name)
self._check_existing_planned_vm(conn_v2_remote, vm)
rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote,
dest_host)
planned_vm = None
disk_paths = self._get_physical_disk_paths(vm_name)
if disk_paths:
vmutils_remote = vmutilsv2.VMUtilsV2(dest_host)
disk_paths_remote = self._get_remote_disk_data(vmutils_remote,
disk_paths,
dest_host)
planned_vm = self._create_remote_planned_vm(conn_v2_local,
conn_v2_remote,
vm, rmt_ip_addr_list,
dest_host)
self._update_planned_vm_disk_resources(vmutils_remote,
conn_v2_remote, planned_vm,
vm_name, disk_paths_remote)
new_resource_setting_data = self._get_vhd_setting_data(vm)
self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host)
| {
"content_hash": "04c4b444e9030f31ea3581a6d8bba0f3",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 79,
"avg_line_length": 45.53448275862069,
"alnum_prop": 0.5777167739492617,
"repo_name": "projectcalico/calico-nova",
"id": "8d5300bf4012a6a600644d41e0dbb16c55a618c9",
"size": "11203",
"binary": false,
"copies": "4",
"ref": "refs/heads/calico-readme",
"path": "nova/virt/hyperv/livemigrationutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15232446"
},
{
"name": "Shell",
"bytes": "20717"
},
{
"name": "Smarty",
"bytes": "489680"
}
],
"symlink_target": ""
} |
import json
import test_utils
from nose.tools import ok_
from django.contrib.auth.models import AnonymousUser
import amo
from addons.models import Category
from mkt import regions
from mkt.api.tests.test_oauth import BaseOAuth
from mkt.regions import set_region
from mkt.reviewers.forms import ApiReviewersSearchForm
from mkt.search.forms import ApiSearchForm, DEVICE_CHOICES_IDS
from mkt.search.views import _filter_search
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
class TestSearchFilters(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestSearchFilters, self).setUp()
self.req = test_utils.RequestFactory().get('/')
self.req.user = AnonymousUser()
self.category = Category.objects.create(name='games', slug='games',
type=amo.ADDON_WEBAPP)
# Pick a region that has relatively few filters.
set_region(regions.UK.slug)
self.form_class = ApiSearchForm
def _grant(self, rules):
self.grant_permission(self.profile, rules)
self.req.groups = self.profile.groups.all()
def _filter(self, req, filters, sorting=None, **kwargs):
form = self.form_class(filters)
if form.is_valid():
qs = Webapp.from_search(self.req, **kwargs)
return _filter_search(
self.req, qs, form.cleaned_data, sorting)._build_query()
else:
return form.errors.copy()
def test_q(self):
qs = self._filter(self.req, {'q': 'search terms'})
qs_str = json.dumps(qs)
ok_('"query": "search terms"' in qs_str)
# TODO: Could do more checking here.
def _addon_type_check(self, query, expected=amo.ADDON_WEBAPP):
qs = self._filter(self.req, query)
ok_({'term': {'type': expected}} in qs['filter']['and'],
'Unexpected type. Expected: %s.' % expected)
def test_addon_type(self):
# Test all that should end up being ADDON_WEBAPP.
# Note: Addon type permission can't be checked here b/c the acl check
# happens in the view, not the _filter_search call.
self._addon_type_check({})
self._addon_type_check({'type': 'app'})
self._addon_type_check({'type': 'theme'})
# Test a bad value.
qs = self._filter(self.req, {'type': 'vindaloo'})
ok_(u'Select a valid choice' in qs['type'][0])
def _status_check(self, query, expected=amo.STATUS_PUBLIC):
qs = self._filter(self.req, query)
ok_({'term': {'status': expected}} in qs['filter']['and'],
'Unexpected status. Expected: %s.' % expected)
def test_status(self):
self.form_class = ApiReviewersSearchForm
# Test all that should end up being public.
# Note: Status permission can't be checked here b/c the acl check
# happens in the view, not the _filter_search call.
self._status_check({})
self._status_check({'status': 'public'})
self._status_check({'status': 'rejected'})
# Test a bad value.
qs = self._filter(self.req, {'status': 'vindaloo'})
ok_(u'Select a valid choice' in qs['status'][0])
def test_category(self):
qs = self._filter(self.req, {'cat': self.category.slug})
ok_({'term': {'category': self.category.slug}} in qs['filter']['and'])
def test_device(self):
qs = self._filter(self.req, {'device': 'desktop'})
ok_({'term': {
'device': DEVICE_CHOICES_IDS['desktop']}} in qs['filter']['and'])
def test_premium_types(self):
ptype = lambda p: amo.ADDON_PREMIUM_API_LOOKUP.get(p)
# Test a single premium type.
qs = self._filter(self.req, {'premium_types': ['free']})
ok_({'in': {'premium_type': [ptype('free')]}} in qs['filter']['and'])
# Test many premium types.
qs = self._filter(self.req, {'premium_types': ['free', 'free-inapp']})
ok_({'in': {'premium_type': [ptype('free'), ptype('free-inapp')]}}
in qs['filter']['and'])
# Test a non-existent premium type.
qs = self._filter(self.req, {'premium_types': ['free', 'platinum']})
ok_(u'Select a valid choice' in qs['premium_types'][0])
def test_app_type(self):
qs = self._filter(self.req, {'app_type': ['hosted']})
ok_({'in': {'app_type': [1]}} in qs['filter']['and'])
def test_manifest_url(self):
url = 'http://hy.fr/manifest.webapp'
qs = self._filter(self.req, {'manifest_url': url})
ok_({'term': {'manifest_url': url}} in qs['filter']['and'])
def test_languages(self):
qs = self._filter(self.req, {'languages': 'fr'})
ok_({'in': {'supported_locales': ['fr']}} in qs['filter']['and'])
qs = self._filter(self.req, {'languages': 'ar,en-US'})
ok_({'in': {'supported_locales': ['ar', 'en-US']}}
in qs['filter']['and'])
def test_region_exclusions(self):
qs = self._filter(self.req, {'q': 'search terms'}, region=regions.CO)
ok_({'not': {'filter': {'term': {'region_exclusions': regions.CO.id}}}}
in qs['filter']['and'])
def test_region_exclusions_override(self):
self.create_flag('override-region-exclusion')
qs = self._filter(self.req, {'q': 'search terms'}, region=regions.CO)
ok_({'not': {'filter': {'term': {'region_exclusions': regions.CO.id}}}}
not in qs['filter']['and'])
| {
"content_hash": "10fd19b91d7d046b82e70dd30afb20f2",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 40.4485294117647,
"alnum_prop": 0.5891656062534085,
"repo_name": "Joergen/zamboni",
"id": "ea68baea05b4b8400aaccd1c3ebe48f52771c37e",
"size": "5501",
"binary": false,
"copies": "1",
"ref": "refs/heads/uge43",
"path": "mkt/search/tests/test_filters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "608838"
},
{
"name": "JavaScript",
"bytes": "1750529"
},
{
"name": "Perl",
"bytes": "565"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6063534"
},
{
"name": "Ruby",
"bytes": "1865"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
} |
from django.db import models
from django.dispatch import receiver
from django.contrib.comments.signals import comment_was_posted
from django.contrib.sites.models import Site
from django.template import loader, Context
from django.utils.translation import ugettext_lazy as _
from django_comments_xtd.conf import settings
from django_comments_xtd.utils import send_mail
from .models import Job
from .signals import job_was_approved, job_was_rejected
### Globals
# Python job board team email address
EMAIL_JOBS_BOARD = 'jobs@python.org'
###
@receiver(comment_was_posted)
def on_comment_was_posted(sender, comment, request, **kwargs):
"""
Notify the author of the post when the first comment has been posted.
Further comments subscribe automatically because our custom forms at
`.forms.JobCommentForm` forces `follow_up` to `True` and Django-comments-xtd
will already notify followers.
"""
# Skip if this is not a 'first comment'
if comment.level > 0 or comment.order > 1:
return False
# Skip if we're not commenting on a Job
Job = models.get_model('jobs', 'Job')
model = comment.content_type.model_class()
if model != Job:
return False
job = model._default_manager.get(pk=comment.object_pk)
email = job.email
name = job.contact or 'Job Submitter'
reviewer_name = comment.name or 'Community Reviewer'
subject = _("Python Job Board: Review comment for: {}").format(
job.display_name)
text_message_template = loader.get_template("django_comments_xtd/email_job_added_comment.txt")
html_message_template = loader.get_template("django_comments_xtd/email_job_added_comment.html")
message_context = Context({ 'user_name': name,
'reviewer_name': reviewer_name,
'comment': comment,
'content_object': job,
'site': Site.objects.get_current() })
text_message = text_message_template.render(message_context)
html_message = html_message_template.render(message_context)
send_mail(subject, text_message, settings.JOB_FROM_EMAIL,
[email, EMAIL_JOBS_BOARD], html=html_message)
def send_job_review_message(job, user, subject_template_path,
message_template_path):
"""Helper function wrapping logic of sending the review message concerning
a job.
`user` param holds user that performed the review action.
"""
subject_template = loader.get_template(subject_template_path)
message_template = loader.get_template(message_template_path)
if user.first_name or user.last_name:
reviewer_name = '{} {}'.format(user.first_name, user.last_name)
else:
reviewer_name = 'Community Reviewer'
message_context = Context({'reviewer_name': reviewer_name,
'content_object': job,
'site': Site.objects.get_current(),
})
# subject can't contain newlines, thus strip() call
subject = subject_template.render(message_context).strip()
message = message_template.render(message_context)
send_mail(subject, message, settings.JOB_FROM_EMAIL,
[job.email, EMAIL_JOBS_BOARD])
@receiver(job_was_approved)
def on_job_was_approved(sender, job, approving_user, **kwargs):
"""Handle approving job offer. Currently an email should be sent to the
person that sent the offer.
"""
send_job_review_message(job, approving_user,
'jobs/email/job_was_approved_subject.txt',
'jobs/email/job_was_approved.txt')
@receiver(job_was_rejected)
def on_job_was_rejected(sender, job, rejecting_user, **kwargs):
"""Handle rejecting job offer. Currently an email should be sent to the
person that sent the offer.
"""
send_job_review_message(job, rejecting_user,
'jobs/email/job_was_rejected_subject.txt',
'jobs/email/job_was_rejected.txt')
@receiver(models.signals.post_save, sender=Job, dispatch_uid="job_was_submitted")
def on_job_was_submitted(sender, instance, created=False, **kwargs):
"""
Notify the jobs board when a new job has been submitted for approval
"""
# Only send emails for newly created Jobs
if not created:
return
# Only new Jobs in review status should trigger the email
Job = models.get_model('jobs', 'Job')
if instance.status != Job.STATUS_REVIEW:
return
subject_template = loader.get_template('jobs/email/job_was_submitted_subject.txt')
message_template = loader.get_template('jobs/email/job_was_submitted.txt')
message_context = Context({'content_object': instance,
'site': Site.objects.get_current()})
subject = subject_template.render(message_context)
message = message_template.render(message_context)
send_mail(subject, message, settings.JOB_FROM_EMAIL,
[EMAIL_JOBS_BOARD])
| {
"content_hash": "800166c160e0bc67e4393d1158818adb",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 99,
"avg_line_length": 39.59375,
"alnum_prop": 0.6558800315706393,
"repo_name": "demvher/pythondotorg",
"id": "dc1a017226492449cba1e9b6d965761cb92c958a",
"size": "5068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jobs/listeners.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "707898"
},
{
"name": "HTML",
"bytes": "374831"
},
{
"name": "JavaScript",
"bytes": "306579"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Python",
"bytes": "912240"
},
{
"name": "Ruby",
"bytes": "218314"
},
{
"name": "Shell",
"bytes": "696"
}
],
"symlink_target": ""
} |
import msgpack
import sys
import socket
import uuid as uuidlib
class Client:
def __init__(self, host, port, uuid=None):
self.uuid = uuid if uuid is not None else uuidlib.uuid4()
self.uuid = str(self.uuid) # coerce to string
self.host = str(host)
self.port = int(port)
self.metadata = {}
self._dirty_metadata = {}
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, self.port))
def subscribe(self, query):
self.s.send(msgpack.packb(query))
def add_metadata(self, d):
strd = {str(k): str(v) for k,v in d.items()}
self.metadata.update(strd)
self._dirty_metadata = strd
def publish(self, value):
message = [self.uuid, self._dirty_metadata, value]
print map(hex, map(ord, msgpack.packb(message)))
self.s.send(msgpack.packb(message))
self._dirty_metadata = {}
if __name__ == '__main__':
c = Client("localhost", "4444")
#c.add_metadata({"Room": "410", "Building": "Soda", "Device": "Temperature Sensor"})
#import time
#i = 0
#while True:
# i += 1
# c.publish(i)
# time.sleep(5)
c.subscribe(sys.argv[1])
| {
"content_hash": "20bb07bb6e9b380ea9d4421bb35dfd07",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 88,
"avg_line_length": 27.90909090909091,
"alnum_prop": 0.5814332247557004,
"repo_name": "gtfierro/cs262-project",
"id": "0c6b2ffd5717dbbc31b84f466b77c9270e42a720",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_client/old/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Go",
"bytes": "468218"
},
{
"name": "Jupyter Notebook",
"bytes": "850256"
},
{
"name": "Makefile",
"bytes": "872"
},
{
"name": "Python",
"bytes": "14913"
},
{
"name": "Shell",
"bytes": "832"
},
{
"name": "TeX",
"bytes": "81256"
}
],
"symlink_target": ""
} |
from nagare.i18n import _
from nagare import ajax, presentation
from .comp import CardWeightEditor
def answer(editor, comp):
if editor.commit():
comp.answer()
@presentation.render_for(CardWeightEditor, 'action')
def render_cardweighteditor(self, h, comp, *args):
if self.weighting_switch:
h << self.action_button
return h.root
@presentation.render_for(CardWeightEditor, 'action_button')
def render_CardWeightEditor_action_button(self, h, comp, *args):
self.weight(str(self.data.weight))
h << h.a(h.i(class_='icon-star-empty'), self.data.weight, class_='btn').action(
comp.call, self, model='edit')
return h.root
@presentation.render_for(CardWeightEditor, 'edit')
def render_CardWeightEditor_edit(self, h, comp, *args):
if self.weighting_switch == self.WEIGHTING_FREE:
id_ = h.generate_id('weight')
with h.form:
h << h.input(
value=self.weight(),
type='text',
id_=id_).action(self.weight).error(self.weight.error)
h << h.button(_('Save'), class_='btn btn-primary').action(answer, self, comp)
h << h.script("""document.getElementById(%s).focus(); """ % ajax.py2js(id_))
elif self.weighting_switch == self.WEIGHTING_LIST:
with h.form:
with h.div(class_='btn select'):
with h.select.action(self.weight):
for value in self.allowed_weights.split(','):
h << h.option(value, value=value.strip()).selected(self.weight())
h << h.button(_('Save'), class_='btn btn-primary').action(answer, self, comp)
return h.root
@presentation.render_for(CardWeightEditor, 'badge')
def render_CardWeightEditor_badge(self, h, comp, *args):
if self.data.weight:
with h.span(class_='badge'):
h << h.span(h.i(class_='icon-star-empty'), ' ',
self.data.weight, class_='label', title=_(u'Weight'))
return h.root
| {
"content_hash": "8fbeca16f8f80b41d56096a054449a48",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 89,
"avg_line_length": 36.36363636363637,
"alnum_prop": 0.6055,
"repo_name": "Net-ng/kansha",
"id": "316e171abf9db38408bfba415ed6e0ebfc77b06f",
"size": "2222",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kansha/card_addons/weight/view.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90511"
},
{
"name": "HTML",
"bytes": "25077"
},
{
"name": "JavaScript",
"bytes": "342746"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "585691"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from hashlib import sha256
from common.serializers.serialization import domain_state_serializer
from indy_common.constants import ATTRIB, GET_ATTR, SIGNATURE_TYPE, REVOC_TYPE, TAG, CRED_DEF_ID, REVOC_REG_DEF_ID, \
CLAIM_DEF_SCHEMA_REF, CLAIM_DEF_PUBLIC_KEYS, SCHEMA_ATTR_NAMES, CLAIM_DEF_FROM, CLAIM_DEF_TAG, \
CLAIM_DEF_TAG_DEFAULT, CLAIM_DEF_CL
from indy_common.req_utils import get_txn_schema_name, get_txn_claim_def_schema_ref, \
get_txn_claim_def_public_keys, get_txn_claim_def_signature_type, get_txn_claim_def_tag, get_txn_schema_version, \
get_txn_schema_attr_names, get_reply_schema_from, get_reply_schema_name, get_reply_schema_version, \
get_reply_schema_attr_names
from indy_common.serialization import attrib_raw_data_serializer
from plenum.common.constants import RAW, ENC, HASH, TXN_TIME, \
TARGET_NYM, DATA, TYPE
from plenum.common.txn_util import get_type, get_payload_data, get_seq_no, get_txn_time, get_from
from plenum.common.types import f
MARKER_ATTR = "1"
MARKER_SCHEMA = "2"
MARKER_CLAIM_DEF = "3"
# TODO: change previous markers in "request refactoring" sprint
MARKER_REVOC_DEF = "4"
MARKER_REVOC_REG_ENTRY = "5"
MARKER_REVOC_REG_ENTRY_ACCUM = "6"
LAST_SEQ_NO = "lsn"
VALUE = "val"
LAST_UPDATE_TIME = "lut"
ALL_ATR_KEYS = [RAW, ENC, HASH]
def make_state_path_for_nym(did) -> bytes:
# TODO: This is duplicated in plenum.DimainRequestHandler
return sha256(did.encode()).digest()
def make_state_path_for_attr(did, attr_name, attr_is_hash=False) -> bytes:
nameHash = sha256(attr_name.encode()).hexdigest() if not attr_is_hash else attr_name
return "{DID}:{MARKER}:{ATTR_NAME}" \
.format(DID=did,
MARKER=MARKER_ATTR,
ATTR_NAME=nameHash).encode()
def make_state_path_for_schema(authors_did, schema_name, schema_version) -> bytes:
return "{DID}:{MARKER}:{SCHEMA_NAME}:{SCHEMA_VERSION}" \
.format(DID=authors_did,
MARKER=MARKER_SCHEMA,
SCHEMA_NAME=schema_name,
SCHEMA_VERSION=schema_version).encode()
def make_state_path_for_claim_def(authors_did, schema_seq_no, signature_type, tag) -> bytes:
return "{DID}:{MARKER}:{SIGNATURE_TYPE}:{SCHEMA_SEQ_NO}:{TAG}" \
.format(DID=authors_did,
MARKER=MARKER_CLAIM_DEF,
SIGNATURE_TYPE=signature_type,
SCHEMA_SEQ_NO=schema_seq_no,
TAG=tag).encode()
def make_state_path_for_revoc_def(authors_did, cred_def_id, revoc_def_type, revoc_def_tag) -> bytes:
return "{DID}:{MARKER}:{CRED_DEF_ID}:{REVOC_DEF_TYPE}:{REVOC_DEF_TAG}" \
.format(DID=authors_did,
MARKER=MARKER_REVOC_DEF,
CRED_DEF_ID=cred_def_id,
REVOC_DEF_TYPE=revoc_def_type,
REVOC_DEF_TAG=revoc_def_tag).encode()
def make_state_path_for_revoc_reg_entry(revoc_reg_def_id) -> bytes:
return "{MARKER}:{REVOC_REG_DEF_ID}" \
.format(MARKER=MARKER_REVOC_REG_ENTRY,
REVOC_REG_DEF_ID=revoc_reg_def_id).encode()
def make_state_path_for_revoc_reg_entry_accum(revoc_reg_def_id) -> bytes:
return "{MARKER}:{REVOC_REG_DEF_ID}" \
.format(MARKER=MARKER_REVOC_REG_ENTRY_ACCUM,
REVOC_REG_DEF_ID=revoc_reg_def_id).encode()
def prepare_get_nym_for_state(reply):
data = reply.get(DATA)
value = None
if data is not None:
parsed = domain_state_serializer.deserialize(data)
parsed.pop(TARGET_NYM, None)
value = domain_state_serializer.serialize(parsed)
nym = reply[TARGET_NYM]
key = make_state_path_for_nym(nym)
return key, value
def prepare_attr_for_state(txn, path_only=False):
"""
Make key(path)-value pair for state from ATTRIB or GET_ATTR
:return: state path, state value, value for attribute store
"""
assert get_type(txn) == ATTRIB
txn_data = get_payload_data(txn)
nym = txn_data[TARGET_NYM]
attr_type, attr_key, value = parse_attr_txn(txn_data)
path = make_state_path_for_attr(nym, attr_key, attr_type == HASH)
if path_only:
return path
hashed_value = hash_of(value) if value else ''
seq_no = get_seq_no(txn)
txn_time = get_txn_time(txn)
value_bytes = encode_state_value(hashed_value, seq_no, txn_time)
return attr_type, path, value, hashed_value, value_bytes
def prepare_claim_def_for_state(txn, path_only=False):
origin = get_from(txn)
schema_seq_no = get_txn_claim_def_schema_ref(txn)
if schema_seq_no is None:
raise ValueError("'{}' field is absent, "
"but it must contain schema seq no".format(CLAIM_DEF_SCHEMA_REF))
data = get_txn_claim_def_public_keys(txn)
if data is None:
raise ValueError("'{}' field is absent, "
"but it must contain components of keys"
.format(CLAIM_DEF_PUBLIC_KEYS))
signature_type = get_txn_claim_def_signature_type(txn)
tag = get_txn_claim_def_tag(txn)
path = make_state_path_for_claim_def(origin, schema_seq_no, signature_type, tag)
if path_only:
return path
seq_no = get_seq_no(txn)
txn_time = get_txn_time(txn)
value_bytes = encode_state_value(data, seq_no, txn_time)
return path, value_bytes
def prepare_revoc_def_for_state(txn, path_only=False):
author_did = get_from(txn)
txn_data = get_payload_data(txn)
cred_def_id = txn_data.get(CRED_DEF_ID)
revoc_def_type = txn_data.get(REVOC_TYPE)
revoc_def_tag = txn_data.get(TAG)
assert author_did
assert cred_def_id
assert revoc_def_type
assert revoc_def_tag
path = make_state_path_for_revoc_def(author_did,
cred_def_id,
revoc_def_type,
revoc_def_tag)
if path_only:
return path
seq_no = get_seq_no(txn)
txn_time = get_txn_time(txn)
assert seq_no
assert txn_time
value_bytes = encode_state_value(txn_data, seq_no, txn_time)
return path, value_bytes
def prepare_revoc_reg_entry_for_state(txn, path_only=False):
author_did = get_from(txn)
txn_data = get_payload_data(txn)
revoc_reg_def_id = txn_data.get(REVOC_REG_DEF_ID)
assert author_did
assert revoc_reg_def_id
path = make_state_path_for_revoc_reg_entry(revoc_reg_def_id=revoc_reg_def_id)
if path_only:
return path
seq_no = get_seq_no(txn)
txn_time = get_txn_time(txn)
assert seq_no
assert txn_time
# TODO: do not duplicate seqNo here
# doing this now just for backward-compatibility
txn_data = deepcopy(txn_data)
txn_data[f.SEQ_NO.nm] = seq_no
txn_data[TXN_TIME] = txn_time
value_bytes = encode_state_value(txn_data, seq_no, txn_time)
return path, value_bytes
def prepare_revoc_reg_entry_accum_for_state(txn):
author_did = get_from(txn)
txn_data = get_payload_data(txn)
revoc_reg_def_id = txn_data.get(REVOC_REG_DEF_ID)
seq_no = get_seq_no(txn)
txn_time = get_txn_time(txn)
assert author_did
assert revoc_reg_def_id
assert seq_no
assert txn_time
path = make_state_path_for_revoc_reg_entry_accum(revoc_reg_def_id=revoc_reg_def_id)
# TODO: do not duplicate seqNo here
# doing this now just for backward-compatibility
txn_data = deepcopy(txn_data)
txn_data[f.SEQ_NO.nm] = seq_no
txn_data[TXN_TIME] = txn_time
value_bytes = encode_state_value(txn_data, seq_no, txn_time)
return path, value_bytes
def prepare_get_claim_def_for_state(reply):
origin = reply.get(CLAIM_DEF_FROM)
schema_seq_no = reply.get(CLAIM_DEF_SCHEMA_REF)
if schema_seq_no is None:
raise ValueError("'{}' field is absent, "
"but it must contain schema seq no".format(CLAIM_DEF_SCHEMA_REF))
signature_type = reply.get(SIGNATURE_TYPE, CLAIM_DEF_CL)
tag = reply.get(CLAIM_DEF_TAG, CLAIM_DEF_TAG_DEFAULT)
path = make_state_path_for_claim_def(origin, schema_seq_no, signature_type, tag)
seq_no = reply[f.SEQ_NO.nm]
value_bytes = None
data = reply.get(CLAIM_DEF_PUBLIC_KEYS)
if data is not None:
txn_time = reply[TXN_TIME]
value_bytes = encode_state_value(data, seq_no, txn_time)
return path, value_bytes
def prepare_get_revoc_def_for_state(reply):
author_did = reply.get(f.IDENTIFIER.nm)
cred_def_id = reply.get(DATA).get(CRED_DEF_ID)
revoc_def_type = reply.get(DATA).get(REVOC_TYPE)
revoc_def_tag = reply.get(DATA).get(TAG)
assert author_did
assert cred_def_id
assert revoc_def_type
assert revoc_def_tag
path = make_state_path_for_revoc_def(author_did,
cred_def_id,
revoc_def_type,
revoc_def_tag)
seq_no = reply[f.SEQ_NO.nm]
txn_time = reply[TXN_TIME]
assert seq_no
assert txn_time
value_bytes = encode_state_value(reply[DATA], seq_no, txn_time)
return path, value_bytes
def prepare_get_revoc_reg_entry_for_state(reply):
revoc_reg_def_id = reply.get(DATA).get(REVOC_REG_DEF_ID)
assert revoc_reg_def_id
path = make_state_path_for_revoc_reg_entry(revoc_reg_def_id=revoc_reg_def_id)
seq_no = reply[f.SEQ_NO.nm]
txn_time = reply[TXN_TIME]
assert seq_no
assert txn_time
value_bytes = encode_state_value(reply[DATA], seq_no, txn_time)
return path, value_bytes
def prepare_get_revoc_reg_entry_accum_for_state(reply):
revoc_reg_def_id = reply.get(DATA).get(REVOC_REG_DEF_ID)
seq_no = reply[f.SEQ_NO.nm]
txn_time = reply[TXN_TIME]
assert revoc_reg_def_id
assert seq_no
assert txn_time
path = make_state_path_for_revoc_reg_entry_accum(revoc_reg_def_id=revoc_reg_def_id)
value_bytes = encode_state_value(reply[DATA], seq_no, txn_time)
return path, value_bytes
def prepare_schema_for_state(txn, path_only=False):
origin = get_from(txn)
schema_name = get_txn_schema_name(txn)
schema_version = get_txn_schema_version(txn)
value = {
SCHEMA_ATTR_NAMES: get_txn_schema_attr_names(txn)
}
path = make_state_path_for_schema(origin, schema_name, schema_version)
if path_only:
return path
seq_no = get_seq_no(txn)
txn_time = get_txn_time(txn)
value_bytes = encode_state_value(value, seq_no, txn_time)
return path, value_bytes
def prepare_get_schema_for_state(reply):
origin = get_reply_schema_from(reply)
schema_name = get_reply_schema_name(reply)
schema_version = get_reply_schema_version(reply)
path = make_state_path_for_schema(origin, schema_name, schema_version)
value_bytes = None
attr_names = get_reply_schema_attr_names(reply)
if attr_names:
data = {
SCHEMA_ATTR_NAMES: attr_names
}
seq_no = reply[f.SEQ_NO.nm]
txn_time = reply[TXN_TIME]
value_bytes = encode_state_value(data, seq_no, txn_time)
return path, value_bytes
def encode_state_value(value, seqNo, txnTime):
return domain_state_serializer.serialize({
LAST_SEQ_NO: seqNo,
LAST_UPDATE_TIME: txnTime,
VALUE: value
})
def decode_state_value(ecnoded_value):
decoded = domain_state_serializer.deserialize(ecnoded_value)
value = decoded.get(VALUE)
last_seq_no = decoded.get(LAST_SEQ_NO)
last_update_time = decoded.get(LAST_UPDATE_TIME)
return value, last_seq_no, last_update_time
def hash_of(text) -> str:
if not isinstance(text, (str, bytes)):
text = domain_state_serializer.serialize(text)
if not isinstance(text, bytes):
text = text.encode()
return sha256(text).hexdigest()
def parse_attr_txn(txn_data):
attr_type, attr = _extract_attr_typed_value(txn_data)
if attr_type == RAW:
data = attrib_raw_data_serializer.deserialize(attr)
# To exclude user-side formatting issues
re_raw = attrib_raw_data_serializer.serialize(data,
toBytes=False)
key, _ = data.popitem()
return attr_type, key, re_raw
if attr_type == ENC:
return attr_type, attr, attr
if attr_type == HASH:
return attr_type, attr, None
def prepare_get_attr_for_state(reply):
nym = reply[TARGET_NYM]
attr_type, attr_key = _extract_attr_typed_value(reply)
data = reply.get(DATA)
if data:
reply = reply.copy()
data = reply.pop(DATA)
reply[attr_type] = data
assert reply[TYPE] == GET_ATTR
attr_type, attr_key, value = parse_attr_txn(reply)
hashed_value = hash_of(value) if value else ''
seq_no = reply[f.SEQ_NO.nm]
txn_time = reply[TXN_TIME]
value_bytes = encode_state_value(hashed_value, seq_no, txn_time)
path = make_state_path_for_attr(nym, attr_key, attr_type == HASH)
return attr_type, path, value, hashed_value, value_bytes
if attr_type == ENC:
attr_key = hash_of(attr_key)
path = make_state_path_for_attr(nym, attr_key,
attr_type == HASH or attr_type == ENC)
return attr_type, path, None, None, None
def _extract_attr_typed_value(txn_data):
"""
ATTR and GET_ATTR can have one of 'raw', 'enc' and 'hash' fields.
This method checks which of them presents and return it's name
and value in it.
"""
existing_keys = [key for key in ALL_ATR_KEYS if key in txn_data]
if len(existing_keys) == 0:
raise ValueError("ATTR should have one of the following fields: {}"
.format(ALL_ATR_KEYS))
if len(existing_keys) > 1:
raise ValueError("ATTR should have only one of the following fields: {}"
.format(ALL_ATR_KEYS))
existing_key = existing_keys[0]
return existing_key, txn_data[existing_key]
| {
"content_hash": "cb5b69131b5514f97427b5c3d6f15cb5",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 117,
"avg_line_length": 36.04935064935065,
"alnum_prop": 0.635636573240147,
"repo_name": "spivachuk/sovrin-node",
"id": "7e01907bc7b8263268741b33dae9a88bf5eb06ba",
"size": "13879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indy_common/state/domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3329"
},
{
"name": "Dockerfile",
"bytes": "7269"
},
{
"name": "Groovy",
"bytes": "8984"
},
{
"name": "Makefile",
"bytes": "11151"
},
{
"name": "Python",
"bytes": "1681637"
},
{
"name": "Ruby",
"bytes": "65393"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "132633"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import spoton
version = spoton.__version__
setup(
name='spoton',
version=version,
author='',
author_email='thung@me.com',
packages=[
'spoton',
],
include_package_data=True,
install_requires=[
'Django>=1.6.5',
],
zip_safe=False,
scripts=['spoton/manage.py'],
) | {
"content_hash": "11b3803ec23f55d91a7c55c2c23d65de",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 36,
"avg_line_length": 16.25925925925926,
"alnum_prop": 0.6173120728929385,
"repo_name": "masterfung/Spoton",
"id": "528fb4252a15c1dab7afc88d220b1cb2c5ac40fa",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "JavaScript",
"bytes": "2385"
},
{
"name": "Python",
"bytes": "44010"
},
{
"name": "Shell",
"bytes": "5095"
}
],
"symlink_target": ""
} |
from neutron_lib.api.definitions import uplink_status_propagation as usp
from neutron_lib.plugins.ml2 import api
from oslo_log import log as logging
from neutron.db import uplink_status_propagation_db as usp_db
LOG = logging.getLogger(__name__)
class UplinkStatusPropagationExtensionDriver(
api.ExtensionDriver, usp_db.UplinkStatusPropagationMixin):
_supported_extension_alias = 'uplink-status-propagation'
def initialize(self):
LOG.info("UplinkStatusPropagationExtensionDriver initialization "
"complete")
@property
def extension_alias(self):
return self._supported_extension_alias
def process_create_port(self, context, data, result):
# Create the port extension attributes.
if usp.PROPAGATE_UPLINK_STATUS in data:
self._process_create_port(context, data, result)
def extend_port_dict(self, session, db_data, result):
self._extend_port_dict(result, db_data)
| {
"content_hash": "41c99f0d76487eb2e0a0d309e70c43bd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 32.36666666666667,
"alnum_prop": 0.717816683831102,
"repo_name": "mahak/neutron",
"id": "e77335e248e95bd03a7ecd10b0b6377972a67d7d",
"size": "1544",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/extensions/uplink_status_propagation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15942116"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
import json
import pickle
import tornado.escape
__author__ = 'kollad'
class FlashMessageMixin(object):
def _cookie_name(self, key):
return '{}_message_cookie'.format(key)
def _get_message_cookie(self, key):
return self.get_cookie(self._cookie_name(key))
def has_message(self, key):
return self._get_message_cookie(key) is not None
def get_message(self, key):
if not self.has_message(key):
return None
message = tornado.escape.url_unescape(self._get_message_cookie(key))
try:
message_data = json.loads(message)
self.clear_cookie(self._cookie_name(key))
return message_data
except Exception as e:
return None
def set_message(self, message, key='error'):
message = json.dumps(message)
self.set_cookie(self._cookie_name(key), tornado.escape.url_escape(message)) | {
"content_hash": "b1ad3103ca4fe0ce43d8903138561698",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 83,
"avg_line_length": 29.548387096774192,
"alnum_prop": 0.62882096069869,
"repo_name": "kollad/turbo-ninja",
"id": "7542491c115f4bb8383b66fa90dc283ccef5df1d",
"size": "916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado_messages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "413"
},
{
"name": "Python",
"bytes": "170897"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
# NOOP
]
| {
"content_hash": "4ea5d422b5cecac264efbcec91d06cb8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 40,
"avg_line_length": 14.76923076923077,
"alnum_prop": 0.6458333333333334,
"repo_name": "staranjeet/fjord",
"id": "2b3a83959bb52bd554579202037f2df0522e00d7",
"size": "216",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "fjord/analytics/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "158619"
},
{
"name": "HTML",
"bytes": "127302"
},
{
"name": "JavaScript",
"bytes": "296754"
},
{
"name": "Python",
"bytes": "853569"
},
{
"name": "Shell",
"bytes": "11673"
},
{
"name": "Smarty",
"bytes": "780"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CustomerProfilesEntityAssignmentsList(ListResource):
def __init__(self, version, customer_profile_sid):
"""
Initialize the CustomerProfilesEntityAssignmentsList
:param Version version: Version that contains the resource
:param customer_profile_sid: The unique string that identifies the CustomerProfile resource.
:returns: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsList
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsList
"""
super(CustomerProfilesEntityAssignmentsList, self).__init__(version)
# Path Solution
self._solution = {'customer_profile_sid': customer_profile_sid, }
self._uri = '/CustomerProfiles/{customer_profile_sid}/EntityAssignments'.format(**self._solution)
def create(self, object_sid):
"""
Create the CustomerProfilesEntityAssignmentsInstance
:param unicode object_sid: The sid of an object bag
:returns: The created CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance
"""
data = values.of({'ObjectSid': object_sid, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return CustomerProfilesEntityAssignmentsInstance(
self._version,
payload,
customer_profile_sid=self._solution['customer_profile_sid'],
)
def stream(self, limit=None, page_size=None):
"""
Streams CustomerProfilesEntityAssignmentsInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists CustomerProfilesEntityAssignmentsInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CustomerProfilesEntityAssignmentsInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return CustomerProfilesEntityAssignmentsPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CustomerProfilesEntityAssignmentsInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CustomerProfilesEntityAssignmentsPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a CustomerProfilesEntityAssignmentsContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsContext
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsContext
"""
return CustomerProfilesEntityAssignmentsContext(
self._version,
customer_profile_sid=self._solution['customer_profile_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a CustomerProfilesEntityAssignmentsContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsContext
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsContext
"""
return CustomerProfilesEntityAssignmentsContext(
self._version,
customer_profile_sid=self._solution['customer_profile_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trusthub.V1.CustomerProfilesEntityAssignmentsList>'
class CustomerProfilesEntityAssignmentsPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the CustomerProfilesEntityAssignmentsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param customer_profile_sid: The unique string that identifies the CustomerProfile resource.
:returns: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsPage
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsPage
"""
super(CustomerProfilesEntityAssignmentsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CustomerProfilesEntityAssignmentsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance
"""
return CustomerProfilesEntityAssignmentsInstance(
self._version,
payload,
customer_profile_sid=self._solution['customer_profile_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trusthub.V1.CustomerProfilesEntityAssignmentsPage>'
class CustomerProfilesEntityAssignmentsContext(InstanceContext):
def __init__(self, version, customer_profile_sid, sid):
"""
Initialize the CustomerProfilesEntityAssignmentsContext
:param Version version: Version that contains the resource
:param customer_profile_sid: The unique string that identifies the resource.
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsContext
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsContext
"""
super(CustomerProfilesEntityAssignmentsContext, self).__init__(version)
# Path Solution
self._solution = {'customer_profile_sid': customer_profile_sid, 'sid': sid, }
self._uri = '/CustomerProfiles/{customer_profile_sid}/EntityAssignments/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the CustomerProfilesEntityAssignmentsInstance
:returns: The fetched CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return CustomerProfilesEntityAssignmentsInstance(
self._version,
payload,
customer_profile_sid=self._solution['customer_profile_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the CustomerProfilesEntityAssignmentsInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trusthub.V1.CustomerProfilesEntityAssignmentsContext {}>'.format(context)
class CustomerProfilesEntityAssignmentsInstance(InstanceResource):
def __init__(self, version, payload, customer_profile_sid, sid=None):
"""
Initialize the CustomerProfilesEntityAssignmentsInstance
:returns: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance
"""
super(CustomerProfilesEntityAssignmentsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'customer_profile_sid': payload.get('customer_profile_sid'),
'account_sid': payload.get('account_sid'),
'object_sid': payload.get('object_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {
'customer_profile_sid': customer_profile_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CustomerProfilesEntityAssignmentsContext for this CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsContext
"""
if self._context is None:
self._context = CustomerProfilesEntityAssignmentsContext(
self._version,
customer_profile_sid=self._solution['customer_profile_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def customer_profile_sid(self):
"""
:returns: The unique string that identifies the CustomerProfile resource.
:rtype: unicode
"""
return self._properties['customer_profile_sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def object_sid(self):
"""
:returns: The sid of an object bag
:rtype: unicode
"""
return self._properties['object_sid']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def url(self):
"""
:returns: The absolute URL of the Identity resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the CustomerProfilesEntityAssignmentsInstance
:returns: The fetched CustomerProfilesEntityAssignmentsInstance
:rtype: twilio.rest.trusthub.v1.customer_profiles.customer_profiles_entity_assignments.CustomerProfilesEntityAssignmentsInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the CustomerProfilesEntityAssignmentsInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trusthub.V1.CustomerProfilesEntityAssignmentsInstance {}>'.format(context)
| {
"content_hash": "da4a412da145d1d41447cf2296fe99c0",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 142,
"avg_line_length": 40.44702842377261,
"alnum_prop": 0.6702229604548648,
"repo_name": "twilio/twilio-python",
"id": "a942f377dc41d50ca82c3f73fd9ff1c0381d4a5e",
"size": "15668",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/trusthub/v1/customer_profiles/customer_profiles_entity_assignments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
"""Tests for `ged4py.detail.io` module."""
from contextlib import contextmanager
import io
import tempfile
import os
import unittest
from ged4py.detail.io import check_bom, guess_lineno, BinaryFileCR
@contextmanager
def _temp_file(data):
"""Create file with unique name and store some data in it.
Returns file name.
"""
fd, fname = tempfile.mkstemp()
os.write(fd, data)
os.close(fd)
yield fname
os.unlink(fname)
class TestDetailIo(unittest.TestCase):
"""Tests for `ged4py.detail.io` module."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_001_guess_bom_codec(self):
"""Test detail.io.check_bom()."""
file = io.BytesIO(b"0 HEAD")
codec = check_bom(file)
self.assertTrue(codec is None)
self.assertEqual(file.tell(), 0)
file = io.BytesIO(b"0")
codec = check_bom(file)
self.assertTrue(codec is None)
self.assertEqual(file.tell(), 0)
file = io.BytesIO(b"\xef\xbb\xbf0 HEAD")
codec = check_bom(file)
self.assertEqual(codec, "utf-8")
self.assertEqual(file.tell(), 3)
file = io.BytesIO(b"\xff\xfe0 HEAD")
codec = check_bom(file)
self.assertEqual(codec, "utf-16-le")
self.assertEqual(file.tell(), 2)
file = io.BytesIO(b"\xfe\xff0 HEAD")
codec = check_bom(file)
self.assertEqual(codec, "utf-16-be")
self.assertEqual(file.tell(), 2)
file = io.BytesIO(b"\xfe\xff")
codec = check_bom(file)
self.assertEqual(codec, "utf-16-be")
self.assertEqual(file.tell(), 2)
def test_002_guess_lineno(self):
"""Test detail.io.guess_lineno()."""
file = io.BytesIO(b"line1\nline2\nline3\nline4\nline5\n")
file.readline()
self.assertEqual(file.tell(), 6)
self.assertEqual(guess_lineno(file), 2)
self.assertEqual(file.tell(), 6)
file.readline()
self.assertEqual(file.tell(), 12)
self.assertEqual(guess_lineno(file), 3)
self.assertEqual(file.tell(), 12)
file.readline()
self.assertEqual(file.tell(), 18)
self.assertEqual(guess_lineno(file), 4)
self.assertEqual(file.tell(), 18)
file.readline()
self.assertEqual(file.tell(), 24)
self.assertEqual(guess_lineno(file), 5)
self.assertEqual(file.tell(), 24)
file.readline()
self.assertEqual(file.tell(), 30)
self.assertEqual(guess_lineno(file), 6)
self.assertEqual(file.tell(), 30)
file.readline()
self.assertEqual(file.tell(), 30)
self.assertEqual(guess_lineno(file), 6)
self.assertEqual(file.tell(), 30)
file.seek(0)
self.assertEqual(file.tell(), 0)
self.assertEqual(guess_lineno(file), 1)
self.assertEqual(file.tell(), 0)
def test_003_BinaryFileCR(self):
file = BinaryFileCR(io.BytesIO(b""))
line = file.readline()
self.assertEqual(len(line), 0)
file = BinaryFileCR(io.BytesIO(b"line1\nline2"))
line = file.readline()
self.assertEqual(line, b"line1\n")
self.assertEqual(file.tell(), 6)
line = file.readline()
self.assertEqual(line, b"line2")
self.assertEqual(file.tell(), 11)
file = BinaryFileCR(io.BytesIO(b"line1\nline2\n"))
line = file.readline()
self.assertEqual(line, b"line1\n")
self.assertEqual(file.tell(), 6)
line = file.readline()
self.assertEqual(line, b"line2\n")
self.assertEqual(file.tell(), 12)
file.seek(6)
line = file.readline()
self.assertEqual(line, b"line2\n")
self.assertEqual(file.tell(), 12)
file = BinaryFileCR(io.BytesIO(b"line1\rline2\r"))
line = file.readline()
self.assertEqual(line, b"line1\r")
self.assertEqual(file.tell(), 6)
line = file.readline()
self.assertEqual(line, b"line2\r")
self.assertEqual(file.tell(), 12)
file.seek(6)
line = file.readline()
self.assertEqual(line, b"line2\r")
self.assertEqual(file.tell(), 12)
file = BinaryFileCR(io.BytesIO(b"line1\r\nline2\r\n"))
line = file.readline()
self.assertEqual(line, b"line1\r\n")
self.assertEqual(file.tell(), 7)
line = file.readline()
self.assertEqual(line, b"line2\r\n")
self.assertEqual(file.tell(), 14)
file.seek(7)
line = file.readline()
self.assertEqual(line, b"line2\r\n")
self.assertEqual(file.tell(), 14)
file = BinaryFileCR(io.BytesIO(b"abc\r\ndef\r\n"))
line = file.readline(0)
self.assertEqual(len(line), 0)
line = file.readline(1)
self.assertEqual(line, b"a")
line = file.readline(3)
self.assertEqual(line, b"bc\r")
line = file.readline()
self.assertEqual(line, b"\n")
line = file.readline(5)
self.assertEqual(line, b"def\r\n")
line = file.readline(100)
self.assertEqual(len(line), 0)
line = file.readline(0)
self.assertEqual(len(line), 0)
| {
"content_hash": "ce86cb40934a5dde829b2a3f309a54d8",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 66,
"avg_line_length": 30.81764705882353,
"alnum_prop": 0.5894254628745944,
"repo_name": "andy-z/ged4py",
"id": "829115a1e889626bccfbfc661574019425ed875f",
"size": "5286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_detail_io.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2283"
},
{
"name": "Python",
"bytes": "118486"
}
],
"symlink_target": ""
} |
import argparse
import urllib, urllib2
import cStringIO
import sys
import tempfile
import h5py
def main():
parser = argparse.ArgumentParser(description='Post an HDF5 file to the service.')
parser.add_argument('baseurl', action="store" )
parser.add_argument('token', action="store" )
parser.add_argument('h5file', action="store" )
parser.add_argument('--update', action='store_true')
parser.add_argument('--dataonly', action='store_true')
parser.add_argument('--preserve', action='store_true', help='Preserve exisiting annotations in the database. Default is overwrite.')
parser.add_argument('--exception', action='store_true', help='Store multiple nnotations at the same voxel in the database. Default is overwrite.')
result = parser.parse_args()
for i in range(4277,6075):
# load the HDF5 file
# result.h5file = "anno" + str(i)+".h5"
fname = "/home/priya/kat11hdf5/anno" + str(i) +".h5"
tmpfile = tempfile.NamedTemporaryFile()
h5fh = h5py.File (fname )
if result.preserve:
url = 'http://%s/emca/%s/preserve/' % ( result.baseurl, result.token )
elif result.exception:
url = 'http://%s/emca/%s/exception/' % ( result.baseurl, result.token )
else:
url = 'http://%s/emca/%s/' % ( result.baseurl, result.token )
if result.update:
url+='update/'
if result.dataonly:
url+='dataonly/'
print url
try:
req = urllib2.Request ( url, open(fname).read() )
response = urllib2.urlopen(req)
except urllib2.URLError, e:
print "Failed URL", url
print "Error %s" % (e.read())
sys.exit(0)
the_page = response.read()
print "Success with id %s" % the_page
if __name__ == "__main__":
main()
| {
"content_hash": "c5fd95d773834139e75f530263d840d6",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 149,
"avg_line_length": 27.6984126984127,
"alnum_prop": 0.6401146131805158,
"repo_name": "openconnectome/open-connectome",
"id": "fd74ef6889bc81a8de29fd58685e8b6dc15d80e2",
"size": "2344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/h5posttest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "43100"
},
{
"name": "C++",
"bytes": "23724"
},
{
"name": "CSS",
"bytes": "53255"
},
{
"name": "HTML",
"bytes": "142332"
},
{
"name": "JavaScript",
"bytes": "303249"
},
{
"name": "Makefile",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "1409968"
},
{
"name": "Shell",
"bytes": "5637"
}
],
"symlink_target": ""
} |
"""Version-independent api tests"""
import httplib2
from oslo.serialization import jsonutils
from glance.tests import functional
class TestApiVersions(functional.FunctionalTest):
def test_version_configurations(self):
"""Test that versioning is handled properly through all channels"""
# v1 and v2 api enabled
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.3',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(versions_json, content)
def test_v2_api_configuration(self):
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.3',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(versions_json, content)
def test_v1_api_configuration(self):
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v1.1',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(versions_json, content)
class TestApiPaths(functional.FunctionalTest):
def setUp(self):
super(TestApiPaths, self).setUp()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.3',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
self.versions_json = jsonutils.dumps(versions)
images = {'images': []}
self.images_json = jsonutils.dumps(images)
def test_get_root_path(self):
"""Assert GET / with `no Accept:` header.
Verify version choices returned.
Bug lp:803260 no Accept header causes a 500 in glance-api
"""
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path(self):
"""Assert GET /images with `no Accept:` header.
Verify version choices returned.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_images_path(self):
"""GET /v1/images with `no Accept:` header.
Verify empty images list returned.
"""
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
def test_get_root_path_with_unknown_header(self):
"""Assert GET / with Accept: unknown header
Verify version choices returned. Verify message in API log about
unknown accept header.
"""
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_root_path_with_openstack_header(self):
"""Assert GET / with an Accept: application/vnd.openstack.images-v1
Verify empty image list returned
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(200, response.status)
self.assertEqual(self.images_json, content)
def test_get_images_path_with_openstack_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v1` header.
Verify version choices returned. Verify message in API log
about unknown accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v10_images_path(self):
"""Assert GET /v1.0/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
def test_get_v1a_images_path(self):
"""Assert GET /v1.a/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
def test_get_va1_images_path(self):
"""Assert GET /va.1/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path(self):
"""Assert GET /versions with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path_with_openstack_header(self):
"""Assert GET /versions with the
`Accept: application/vnd.openstack.images-v1` header.
Verify version choices returned.
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_versions_path(self):
"""Assert GET /v1/versions with `no Accept:` header
Verify 404 returned
"""
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(404, response.status)
def test_get_versions_choices(self):
"""Verify version choices returned"""
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path_with_openstack_v2_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v2` header.
Verify version choices returned. Verify message in API log
about unknown version in accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v12_images_path(self):
"""Assert GET /v1.2/images with `no Accept:` header
Verify version choices returned
"""
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
| {
"content_hash": "fbb39da33a29d35bb6a9a8c35c7eee32",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 75,
"avg_line_length": 37.634375,
"alnum_prop": 0.5192227850203438,
"repo_name": "yanheven/glance",
"id": "860df8fd8e5e09c4c2fe5081a8b4a4dfc5d9d93c",
"size": "12679",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "glance/tests/functional/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3838949"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
} |
"""
Load geopotential heights and orography cube to get lat/lon cross-section
13/08/2014
"""
import os, sys
import pdb
import iris
import iris.analysis.cartography
from update_pp_cube_coords import update_coords
#import h5py
import numpy as np
#c_section_lon=74.
c_lon_min=75.
c_lon_max=85.
gap=1.
c_section_lat=0
diagnostic=4
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'djzns' ]
#experiment_ids = ['djznq', 'dklyu', 'dkmbq', 'dklzq', 'djzns' ] #djznw and dklwu missing
#experiment_ids = ['dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['dkbhu']
experiment_ids = ['djznw']
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
diag = iris.load_cube('/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s.pp' % (expmin1,experiment_id, diagnostic))
#diag = iris.load(f_diag)
print diag
cs = diag.coord_system('CoordSystem')
print cs
csur=cs.ellipsoid
lat = diag.coord('grid_latitude').points
lon = diag.coord('grid_longitude').points
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
for i, coord in enumerate (diag.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_diag = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_diag = i
diag.remove_coord('grid_latitude')
diag.remove_coord('grid_longitude')
diag.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_diag)
diag.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_diag)
for c_section_lon in np.arange(c_lon_min,c_lon_max+1, gap):
if (c_section_lon != 0):
print c_section_lon
l=diag.coord('grid_longitude').nearest_neighbour_index(c_section_lon)
if lon_dim_coord_diag==0:
xc=diag[l,:]
if lon_dim_coord_diag==1:
xc=diag[:,l,:]
if lon_dim_coord_diag==2:
xc=diag[:,:,l,:]
if lon_dim_coord_diag==3:
xc=diag[:,:,:,l,:]
iris.save(xc, '/nfs/a90/eepdw/Figures/EMBRACE/Cross_Sections/%s_%s_height_XC_Longitude_%s.pp' % (experiment_id, diagnostic, str(c_section_lon).replace(".", "")))
#THESE METHODS MIGHT WORK BUT TAKE A LONG TIME - I THINK BECAUSE THEY LOAD THE WHOLD CUBE IN TO INDEX
#xc = iris.analysis.interpolate.extract_nearest_neighbour(diag, [('grid_longitude', c_section_lon)]).data
#lon_slice = iris.analysis.interpolate.linear(diag, [('grid_longitude', l), ('grid_latitude', np.linspace(20, 30, 50))])
#print lon_slice
#pdb.set_trace
#iris.save(lon_slice, '/nfs/a90/eepdwCross_Sections/%s_%s_height_XC_Longitude_%s.pp' % (experiment_id, diagnostic, str(c_section_lon).replace(".", "")))
#iris.save(iris.analysis.interpolate.extract_nearest_neighbour(diag, [('grid_longitude', c_section_lon)]),
# '/nfs/a90/eepdwCross_Sections/%s_%s_height_XC_Longitude_%s.pp'
# % (experiment_id, diagnostic, str(c_section_lon).replace(".", "")))
#xc=lon_slice.data
#np.savez('/nfs/a90/eepdwCross_Sections/%s_%s_height_XC_Longitude_%s' % (experiment_id, diag, c_section_lon), xc=xc, coord=diag.coord('grid_latitude').points)
| {
"content_hash": "bc1f84aeecb7c756a383d4c14957f617",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 165,
"avg_line_length": 33.04950495049505,
"alnum_prop": 0.6698621929298981,
"repo_name": "peterwilletts24/Python-Scripts",
"id": "839e771fd9da3c5db511e0bb9e709ee56d38087d",
"size": "3338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vertical_cross_sections/pressure_heights_save_multiple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2242925"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
} |
import os
import re
from docutils.parsers.rst import Directive, directives
from docutils.transforms import Transform
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils import io, nodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
from sphinx import addnodes
class CMakeModule(Directive):
""" Declare the cmake-module directive
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'encoding': directives.encoding}
def __init__(self, *args, **keys):
self.re_start = re.compile(r'^#\[(?P<eq>=*)\[\.rst:$')
Directive.__init__(self, *args, **keys)
def run(self):
settings = self.state.document.settings
if not settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
env = self.state.document.settings.env
rel_path, path = env.relfn2path(self.arguments[0])
path = os.path.normpath(path)
encoding = self.options.get('encoding', settings.input_encoding)
e_handler = settings.input_encoding_error_handler
try:
settings.record_dependencies.add(path)
f = io.FileInput(source_path=path, encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
raw_lines = f.read().splitlines()
f.close()
rst = None
lines = []
for line in raw_lines:
if rst is not None and rst != '#':
# Bracket mode: check for end bracket
pos = line.find(rst)
if pos >= 0:
if line[0] == '#':
line = ''
else:
line = line[0:pos]
rst = None
else:
# Line mode: check for .rst start (bracket or line)
m = self.re_start.match(line)
if m:
rst = ']%s]' % m.group('eq')
line = ''
elif line == '#.rst:':
rst = '#'
line = ''
elif rst == '#':
if line == '#' or line[:2] == '# ':
line = line[2:]
else:
rst = None
line = ''
elif rst is None:
line = ''
lines.append(line)
if rst is not None and rst != '#':
raise self.warning('"%s" found unclosed bracket "#[%s[.rst:" in %s' %
(self.name, rst[1:-1], path))
self.state_machine.insert_input(lines, path)
return []
def setup(app):
app.add_directive('cmake-module', CMakeModule) | {
"content_hash": "8008df0ae95823fd2a8c033d8b18eadf",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 81,
"avg_line_length": 36.69230769230769,
"alnum_prop": 0.5094339622641509,
"repo_name": "thfabian/sequoia",
"id": "3a21e4d30f4a23c9df599b4aedebbc99e01b418d",
"size": "4176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sequoia-docs/src/cmake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1313867"
},
{
"name": "CMake",
"bytes": "328740"
},
{
"name": "GLSL",
"bytes": "4544"
},
{
"name": "HTML",
"bytes": "16126"
},
{
"name": "JavaScript",
"bytes": "6477"
},
{
"name": "Python",
"bytes": "22670"
},
{
"name": "Shell",
"bytes": "50717"
}
],
"symlink_target": ""
} |
"""
===============
bright_analysis
===============
This package is a template for other DESI_ Python_ packages.
You should use ``python setup.py version`` to set the version. This requires
the desiutil package.
.. _DESI: http://desi.lbl.gov
.. _Python: http://python.org
"""
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# The line above will help with 2to3 support.
#
# Set version string.
#
from ._version import __version__
| {
"content_hash": "1d8b90ce3c63bc52b8c3e1876089809e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.6378269617706237,
"repo_name": "apcooper/bright_analysis",
"id": "81878b2f653518a37bdf9c18e33987a5c8ff97be",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/bright_analysis/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70213"
}
],
"symlink_target": ""
} |
import foauth.providers
from foauth import OAuthDenied, OAuthError
class Venmo(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'https://venmo.com/'
docs_url = 'https://developer.venmo.com/docs/oauth'
favicon_url = provider_url + 'favicon.ico'
category = 'Money'
# URLs to interact with the API
authorize_url = 'https://api.venmo.com/v1/oauth/authorize'
access_token_url = 'https://api.venmo.com/v1/oauth/access_token'
api_domain = 'api.venmo.com'
bearer_type = foauth.providers.BEARER_URI
available_permissions = [
(None, 'read your account details and current balance'),
('access_email', 'read your email address'),
('access_phone', 'read your phone number'),
('access_balance', 'read your current balance'),
('access_friends', 'access your list of friends'),
('access_feed', 'read your payment history and activity feed'),
]
def get_authorize_params(self, redirect_uri, scopes):
scopes = ['access_profile'] + scopes
return super(Venmo, self).get_authorize_params(redirect_uri, scopes)
def get_user_id(self, key):
r = self.api(key, self.api_domain, u'/v1/me')
return unicode(r.json()[u'data'][u'user'][u'id'])
| {
"content_hash": "54bdb92dee4b03a649436d59c3a0197d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 37.6764705882353,
"alnum_prop": 0.6518345042935206,
"repo_name": "foauth/foauth.org",
"id": "d5d78084999d322be2269e380e480be4b8c41d04",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/venmo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8695"
},
{
"name": "HTML",
"bytes": "31228"
},
{
"name": "Python",
"bytes": "124340"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name='redsolutioncms.django-generic-ratings',
version=__import__('ratings').__version__,
description=read('DESCRIPTION'),
author='Francesco Banconi',
author_email='francesco.banconi@gmail.com',
url='https://bitbucket.org/frankban/django-generic-ratings/downloads',
zip_safe=False,
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
entry_points={
'redsolutioncms': ['ratings = ratings.redsolution_setup', ],
}
)
| {
"content_hash": "57a5b80677448d5b4e0e8bd11f6a29b5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 33.972972972972975,
"alnum_prop": 0.6459824980111376,
"repo_name": "redsolution/django-generic-ratings",
"id": "afd4d2d4b83748df35ddec4b472cb01836006a60",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6714"
},
{
"name": "Python",
"bytes": "115064"
}
],
"symlink_target": ""
} |
import pygame
import pygame.locals as GAME_GLOBALS
import pygame.event as GAME_EVENTS
# knihovna zakladnich funkci
import sys
# knihovna nahodnych cisel
import random
# knihovna casu
import time
# vyctove typy
from enum import Enum
# objekty #####################################################################################
class Smer(Enum):
VLEVO = 1
VPRAVO = 2
NAHORU = 3
DOLU = 4
# objekt reprezentujici mopslika
class Mopslik:
meObrazky = {
Smer.VLEVO: "zatim zadny",
Smer.VPRAVO: "zatim zadny",
Smer.NAHORU: "zatim zadny",
Smer.DOLU: "zatim zadny"
}
# konstruktor, ktery vytvori noveho mopslika
def __init__(self):
self.barva = (255, 255, 255)
self.x = 0
self.y = 0
self.bezim = Smer.VPRAVO
# mopsliku, udelej krok vpravo
def udelejKrokVpravo(self):
self.x += 5
self.bezim = Smer.VPRAVO
def udelejKrokVlevo(self):
self.x -= 10
self.bezim = Smer.VLEVO
def udelejKrokDolu(self):
self.y += 10
self.bezim = Smer.DOLU
def udelejKrokNahoru(self):
self.y -= 10
self.bezim = Smer.NAHORU
def nastavJakVypadam(self, kdyzJduVlevo, kdyzJduVpravo, kdyzJduNahoru, kdyzJduDolu):
self.meObrazky[Smer.VLEVO] = kdyzJduVlevo
self.meObrazky[Smer.VPRAVO] = kdyzJduVpravo
self.meObrazky[Smer.NAHORU] = kdyzJduNahoru
self.meObrazky[Smer.DOLU] = kdyzJduDolu
def nakresliSe(self):
# nakresli obrazek mopslika
obrazovka.blit(self.meObrazky[self.bezim], (self.x, self.y))
# nakresli mopslika grafikou
#pygame.draw.rect(obrazovka, self.barva, (self.x, self.y, 50, 30))
# objekt reprezentujici dobrotu
class Dobrota:
# konstruktor, ktery vytvori novou dobrotu
def __init__(self):
self.barva = (255, 0 , 0)
def nastavJakVypadam(self, obrazekDobroty):
self.obrazek = obrazekDobroty
def nastavPozici(self, x, y):
self.x = x
self.y = y
def nakresliSe(self):
obrazovka.blit(self.obrazek, (self.x, self.y))
# objekt reprezentujici zlodeje mopsliku
class Zlodej:
# konstruktor, ktery vytvori noveho zlodeje
def __init__(self):
self.barva = (255, 0 , 0)
# objekt reprezentujici klavesnici
class Klavesnice:
def __init__(self):
self.vlevoStisknuta = False
self.vpravoStisknuta = False
self.nahoruStisknuta = False
self.doluStisknuta = False
# globalni promenne ###########################################################################
# obrazovka
sirkaObrazovky = 1000
vyskaObrazovky = 600
# mopslik
mopslik = Mopslik()
# klavesnice
klavesnice = Klavesnice()
# dobrota
dobrota = Dobrota();
# funkce ######################################################################################
# funkce ukoncujici hru
def ukonciHru():
pygame.quit()
sys.exit()
# funkce ktera zpracuje vsechny udalosti klavesnice, ktere
# se staly od minuleho prekresleni a nastavi odpovidajici
# globalni promenne
def zpracujUdalosti(klavesnice):
for udalost in GAME_EVENTS.get():
# pokud byla stisknuta klavesa
if udalost.type == pygame.KEYDOWN:
# pokud byla stisknuta klavesa VLEVO
if udalost.key == pygame.K_LEFT:
klavesnice.vlevoStisknuta = True
# pokud byla stisknuta klavesa VPRAVO
if udalost.key == pygame.K_RIGHT:
klavesnice.vpravoStisknuta = True
# pokud byla stisknuta klavesa NAHORU
if udalost.key == pygame.K_UP:
klavesnice.nahoruStisknuta = True
# pokud byla stisknuta klavesa DOLU
if udalost.key == pygame.K_DOWN:
klavesnice.doluStisknuta = True
# pokud byla stisknuta klavesa ESCAPE
if udalost.key == pygame.K_ESCAPE:
ukonciHru()
# pokud byla puvodne stisknuta klavesa zvednuta
if udalost.type == pygame.KEYUP:
# pokud byla stisknuta klavesa VLEVO
if udalost.key == pygame.K_LEFT:
klavesnice.vlevoStisknuta = False
# pokud byla stisknuta klavesa VPRAVO
if udalost.key == pygame.K_RIGHT:
klavesnice.vpravoStisknuta = False
# pokud byla stisknuta klavesa NAHORU
if udalost.key == pygame.K_UP:
klavesnice.nahoruStisknuta = False
# pokud byla stisknuta klavesa DOLU
if udalost.key == pygame.K_DOWN:
klavesnice.doluStisknuta = False
# pokud prisla udalost pro ukonceni hry
if udalost.type == GAME_GLOBALS.QUIT:
ukonciHru()
# pohni objekty - obrazky mopslika, dobrot, chytace - podle promenny a casu
def pohniMopslikemAOstatnimyObjekty(klavesnice):
if klavesnice.vpravoStisknuta:
mopslik.udelejKrokVpravo()
if klavesnice.vlevoStisknuta:
mopslik.udelejKrokVlevo()
if klavesnice.doluStisknuta:
mopslik.udelejKrokDolu()
if klavesnice.nahoruStisknuta:
mopslik.udelejKrokNahoru()
# nakresli dobroty
dobrota.nakresliSe()
# nakresli mopslika jako posledniho, aby byl nahore nad ostatnimy sprity
mopslik.nakresliSe()
# program #####################################################################################
# inicializace
pygame.init()
# nastaveni sirky a vysky aobrazovky
obrazovka = pygame.display.set_mode((sirkaObrazovky, vyskaObrazovky))
# nastaveni jmena okna
pygame.display.set_caption('Pug Run')
# natazeni obrazku
obrazekMopslikBeziVlevo = pygame.image.load("obrazky/mopslik-vlevo.png")
obrazekMopslikBeziVpravo = pygame.image.load("obrazky/mopslik-vpravo.png")
obrazekMopslikBeziNahoru = pygame.image.load("obrazky/mopslik-vlevo.png")
obrazekMopslikBeziDolu = pygame.image.load("obrazky/mopslik-vlevo.png")
obrazekDobrota = pygame.image.load("obrazky/kosticka.png")
# natazeni zvuku
zvukStek = pygame.mixer.Sound("zvuky/haf.ogg")
zvukStek.set_volume(50)
zvukStek.play()
# pouziti obrazku, zvuku, ...
mopslik.nastavJakVypadam(obrazekMopslikBeziVlevo, obrazekMopslikBeziVpravo, obrazekMopslikBeziNahoru, obrazekMopslikBeziDolu)
dobrota.nastavJakVypadam(obrazekDobrota)
dobrota.nastavPozici(random.randint(10,sirkaObrazovky), random.randint(10,vyskaObrazovky))
# nekonecna smycka hry
while True:
# zaciname znovu: vybarvi pozadi cernou barvou
obrazovka.fill((255,255,255))
# zpracuj udalosti klavesnice/mysi/... a nastav globalni promenne
zpracujUdalosti(klavesnice)
# pohni mopslike & spol
pohniMopslikemAOstatnimyObjekty(klavesnice)
# v kazdem cyklu prekresli obrazovku do okna
pygame.display.update()
# ... a chvilku pockej
time.sleep(0.03)
| {
"content_hash": "7f26670abb5c948101cf0cc95074c400",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 125,
"avg_line_length": 32.85024154589372,
"alnum_prop": 0.6316176470588235,
"repo_name": "dvorka/kids-coding-crash-course",
"id": "0569a3cfa85a8d967cd95409ac323ee08c6756d8",
"size": "6949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "50-game-pug-run/game-pug-run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7240"
},
{
"name": "Shell",
"bytes": "27"
}
],
"symlink_target": ""
} |
from vizdoom import *
from doom_instance import DoomInstance
import numpy as np
class DoomInstanceCig():
def __init__(self, config, wad, skiprate, visible=False, mode=Mode.PLAYER, actions=None, id=None, color=0, bot_num=10):
self.bot_num = bot_num
args = (
"+name DoomNet +colorset ".format(color) +
" -deathmatch"
" +sv_forcerespawn 0"
" +sv_noautoaim 1"
" +sv_respawnprotect 1"
" +sv_spawnfarthest 1"
" +sv_nocrouch 1"
" +sv_nocrouch 1"
" +viz_respawn_delay 0"
#" +viz_nocheat 1"
#" +viz_debug 0"
" +timelimit 10.0"
)
super().__init__(config, wad, skiprate, visible, mode, actions, id, args)
def step_normalized(self, action):
state, reward, finished = super().step_normalized(action)
# comment this for basic and rocket configs
if state.variables is not None:
diff = state.variables - self.variables
#if diff[1] < -100:
# diff[1] = 0
diff = np.multiply(diff, [100 * 0.5 * (0.2 if diff[0] > 0 else 0.02), 100 * 0.5 * 0.01, 100 * 1 * 1])
if diff[2] > 0:
print('HIT!!!', self.id)
#if dead:
# diff[2] = -100
# print('DEAD', self.id)
# penalize shots with zero ammo
if self.variables[0] == 0 and self.actions[action][2] == 1:
diff[0] -= 10
reward += diff.sum() - 3
self.variables = state.variables.copy()
state.variables[2] = 0
return state, reward, finished
def new_episode(self):
super().new_episode()
self.game.send_game_command("removebots")
if self.id is not None:
for i in range(self.bot_num):
self.game.send_game_command("addbot")
| {
"content_hash": "1fca8630db91634d4468251f6754882c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 123,
"avg_line_length": 35.407407407407405,
"alnum_prop": 0.5135983263598326,
"repo_name": "akolishchak/doom-net-pytorch",
"id": "49ced0ec5803195509c4715c3090e720f45a3f34",
"size": "1995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/doom_instance_cig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303488"
},
{
"name": "Shell",
"bytes": "38833"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "pyNIBE",
version = "0.1",
author = "Rickard Ostman",
author_email = "rickard@ostman.net",
description = ("library for interaction with the NIBE UPLINK service"),
keywords = "nibe heating geothermal home-automation monitoring",
url = "https://github.com/hypokondrickard/pyNIBE",
packages=['pyNIBE', 'tests'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
],
)
| {
"content_hash": "25e3f228d28d3b6183ec1a9807a70a23",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 33.92,
"alnum_prop": 0.6650943396226415,
"repo_name": "hypokondrickard/pyNIBE",
"id": "0e1cbc6b9b48ed24308983bbdef8c727e1d60a75",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5213"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name="TrebuchetTrigger",
version="0.5.6",
packages=find_packages(),
install_requires=['GitPython>=0.3.1', 'PyYAML>=3.10', 'redis>=2.4.9', 'salt'],
author="Ryan Lane",
author_email="ryan@ryandlane.com",
description="An extendable git interface to trebuchet.",
license="apache2",
url="https://github.com/trebuchet-deploy/trigger",
entry_points={
'console_scripts': [
'git-trigger = trigger.shell:main',
'git-deploy = trigger.shell:main',
'trigger-submodule-update = trigger.utils.submodule_update:main',
],
},
)
| {
"content_hash": "3a9e43b9587c31d2405b4ef462a61c9e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 30,
"alnum_prop": 0.6196969696969697,
"repo_name": "wikimedia/operations-software-deployment-trebuchet-trigger",
"id": "db3fe6e159001ead00dbbef4dba58054a749a988",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46356"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.utils.importlib import import_module
def merge_dicts(d1, d2):
'''Update dictionary recursively. If values for a given key exist in both dictionaries and are dict-like they are merged.'''
for k, v in d2.iteritems():
# Try to merge the values as if they were dicts.
try:
merge_dicts(d1[k], v)
# Otherwise just overwrite the original value (if any).
except (AttributeError, KeyError):
d1[k] = v
class DatabaseOperations(object):
dbindexer_compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
def __init__(self):
self._dbindexer_cache = {}
def compiler(self, compiler_name):
if compiler_name not in self._dbindexer_cache:
target = super(DatabaseOperations, self).compiler(compiler_name)
base = getattr(
import_module(self.dbindexer_compiler_module), compiler_name)
class Compiler(base, target):
pass
self._dbindexer_cache[compiler_name] = Compiler
return self._dbindexer_cache[compiler_name]
class BaseDatabaseWrapper(object):
def __init__(self, *args, **kwargs):
super(BaseDatabaseWrapper, self).__init__(*args, **kwargs)
class Operations(DatabaseOperations, self.ops.__class__):
pass
self.ops.__class__ = Operations
self.ops.__init__()
def DatabaseWrapper(settings_dict, *args, **kwargs):
engine = settings.DBINDEXER_TARGET_ENGINE + ".base"
target = import_module(engine).DatabaseWrapper
class Wrapper(BaseDatabaseWrapper, target):
pass
return Wrapper(settings_dict, *args, **kwargs)
| {
"content_hash": "382a416ea783d0a5acc6bc770cfe032c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 128,
"avg_line_length": 34.63265306122449,
"alnum_prop": 0.6364172068355922,
"repo_name": "potatolondon/dbindexer-1-4",
"id": "931a41b80f179f752ec07bf514388ea3599f8f8c",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbindexer/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45894"
}
],
"symlink_target": ""
} |
"""
Simplified Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'注意': 'attention',
u'小心': 'caution',
u'危险': 'danger',
u'错误': 'error',
u'提示': 'hint',
u'重要': 'important',
u'注解': 'note',
u'技巧': 'tip',
u'警告': 'warning',
u'忠告': 'admonition',
u'侧框': 'sidebar',
u'主题': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'醒目': 'rubric',
u'铭文': 'epigraph',
u'要点': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'复合': 'compound',
u'容器': 'container',
#u'questions (translation required)': 'questions',
u'表格': 'table',
u'csv表格': 'csv-table',
u'列表表格': 'list-table',
#u'qa (translation required)': 'questions',
#u'faq (translation required)': 'questions',
u'元数据': 'meta',
#u'imagemap (translation required)': 'imagemap',
u'图片': 'image',
u'图例': 'figure',
u'包含': 'include',
u'原文': 'raw',
u'代替': 'replace',
u'统一码': 'unicode',
u'日期': 'date',
u'类型': 'class',
u'角色': 'role',
u'默认角色': 'default-role',
u'标题': 'title',
u'目录': 'contents',
u'章节序号': 'sectnum',
u'题头': 'header',
u'页脚': 'footer',
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
u'target-notes (translation required)': 'target-notes',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Simplified Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
u'缩写': 'abbreviation',
u'简称': 'acronym',
u'index (translation required)': 'index',
u'i (translation required)': 'index',
u'下标': 'subscript',
u'上标': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'title (translation required)': 'title-reference',
u't (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'pep (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'rfc (translation required)': 'rfc-reference',
u'强调': 'emphasis',
u'加粗': 'strong',
u'字面': 'literal',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'uri (translation required)': 'uri-reference',
u'url (translation required)': 'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Simplified Chinese role names to canonical role names
for interpreted text."""
| {
"content_hash": "6a9fb4499843d62aec0fbd545b93122e",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 35.56666666666667,
"alnum_prop": 0.6119962511715089,
"repo_name": "santisiri/popego",
"id": "13e050a01ba2fda25df1875629f7e413d81a778f",
"size": "3878",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/docutils-0.4-py2.5.egg/docutils/parsers/rst/languages/zh_cn.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flatpages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrderedFlatPage',
fields=[
('flatpage_ptr', models.OneToOneField(serialize=False, auto_created=True, parent_link=True, primary_key=True, to='flatpages.FlatPage')),
('order', models.PositiveIntegerField(db_index=True, editable=False)),
],
options={
'abstract': False,
'ordering': ('order',),
},
bases=('flatpages.flatpage', models.Model),
),
]
| {
"content_hash": "6a01eadb5fe085a93b098809aa1bc5a4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 152,
"avg_line_length": 29.56,
"alnum_prop": 0.5588633288227334,
"repo_name": "sreidy/roboticsclub.org",
"id": "1cf1678349f27cc53427b71b6dd77e5ab715bf9d",
"size": "763",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ordered_flatpages/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87807"
},
{
"name": "HTML",
"bytes": "32573"
},
{
"name": "JavaScript",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "239652"
}
],
"symlink_target": ""
} |
from django import template
from rollyourown.seo.base import get_metadata, get_linked_metadata
from django.template import VariableDoesNotExist
register = template.Library()
class MetadataNode(template.Node):
def __init__(self, metadata_name, variable_name, target, site, language):
self.metadata_name = metadata_name
self.variable_name = variable_name
self.target = template.Variable(target or 'request.path')
self.site = site and template.Variable(site) or None
self.language = language and template.Variable(language) or None
def render(self, context):
try:
target = self.target.resolve(context)
except VariableDoesNotExist:
msg = (u"{% get_metadata %} needs some path information.\n"
u"Please use RequestContext with the django.core.context_processors.request context processor.\n"
"Or provide a path or object explicitly, eg {% get_metadata for path %} or {% get_metadata for object %}")
raise template.TemplateSyntaxError(msg)
else:
if callable(target):
target = target()
if isinstance(target, basestring):
path = target
elif hasattr(target, 'get_absolute_url'):
path = target.get_absolute_url()
elif hasattr(target, "__iter__") and 'get_absolute_url' in target:
path = target['get_absolute_url']()
else:
path = None
kwargs = {}
# If a site is given, pass that on
if self.site:
kwargs['site'] = self.site.resolve(context)
# If a language is given, pass that on
if self.language:
kwargs['language'] = self.language.resolve(context)
metadata = None
# If the target is a django model object
if hasattr(target, 'pk'):
metadata = get_linked_metadata(target, self.metadata_name, context, **kwargs)
if not isinstance(path, basestring):
path = None
if not metadata:
# Fetch the metadata
try:
metadata = get_metadata(path, self.metadata_name, context, **kwargs)
except Exception, e:
raise template.TemplateSyntaxError(e)
# If a variable name is given, store the result there
if self.variable_name is not None:
context[self.variable_name] = metadata
return ""
else:
return unicode(metadata)
def do_get_metadata(parser, token):
"""
Retrieve an object which can produce (and format) metadata.
{% get_metadata [for my_path] [in my_language] [on my_site] [as my_variable] %}
or if you have multiple metadata classes:
{% get_metadata MyClass [for my_path] [in my_language] [on my_site] [as my_variable] %}
"""
bits = list(token.split_contents())
tag_name = bits[0]
bits = bits[1:]
metadata_name = None
args = { 'as': None, 'for': None, 'in': None, 'on': None }
# If there are an even number of bits,
# a metadata name has been provided.
if len(bits) % 2:
metadata_name = bits[0]
bits = bits[1:]
# Each bits are in the form "key value key value ..."
# Valid keys are given in the 'args' dict above.
while len(bits):
if len(bits) < 2 or bits[0] not in args:
raise template.TemplateSyntaxError("expected format is '%r [as <variable_name>]'" % tag_name)
key, value, bits = bits[0], bits[1], bits[2:]
args[key] = value
return MetadataNode(metadata_name,
variable_name = args['as'],
target = args['for'],
site = args['on'],
language = args['in'])
register.tag('get_metadata', do_get_metadata)
| {
"content_hash": "2511404a1087fab22c33c02d56bda027",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 130,
"avg_line_length": 37.00961538461539,
"alnum_prop": 0.5863860743050143,
"repo_name": "AlexLSB/django-seo",
"id": "34688130524372d59345fefe6e24dab3f44c2934",
"size": "3896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rollyourown/seo/templatetags/seo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "81"
},
{
"name": "Python",
"bytes": "139736"
}
],
"symlink_target": ""
} |
from py_utils.refactor.annotated_symbol.class_definition import *
from py_utils.refactor.annotated_symbol.function_definition import *
from py_utils.refactor.annotated_symbol.import_statement import *
from py_utils.refactor.annotated_symbol.reference import *
from py_utils.refactor import snippet
__all__ = [
'Annotate',
'Class',
'Function',
'Import',
'Reference',
]
# Specific symbol types with extra methods for manipulating them.
# Python's full grammar is here:
# https://docs.python.org/2/reference/grammar.html
# Annotated Symbols have an Annotate classmethod that takes a symbol type and
# list of children, and returns an instance of that annotated Symbol.
ANNOTATED_SYMBOLS = (
AsName,
Class,
DottedName,
ImportFrom,
ImportName,
Function,
)
# Unfortunately, some logical groupings are not represented by a node in the
# parse tree. To work around this, some annotated Symbols have an Annotate
# classmethod that takes and returns a list of Snippets instead.
ANNOTATED_GROUPINGS = (
Reference,
)
def Annotate(f):
"""Return the syntax tree of the given file."""
return _AnnotateNode(snippet.Snippetize(f))
def _AnnotateNode(node):
if not isinstance(node, snippet.Symbol):
return node
children = map(_AnnotateNode, node.children)
for symbol_type in ANNOTATED_GROUPINGS:
annotated_grouping = symbol_type.Annotate(children)
if annotated_grouping:
children = annotated_grouping
break
for symbol_type in ANNOTATED_SYMBOLS:
annotated_symbol = symbol_type.Annotate(node.type, children)
if annotated_symbol:
return annotated_symbol
return snippet.Symbol(node.type, children)
| {
"content_hash": "a2f6a10f75cdbafeb10d53bac9f119a1",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 25.651515151515152,
"alnum_prop": 0.7347903130537508,
"repo_name": "sahiljain/catapult",
"id": "610bc15cff919b856d2e9ec2f927c33aa6ab47bc",
"size": "1890",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "common/py_utils/py_utils/refactor/annotated_symbol/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6390"
},
{
"name": "CSS",
"bytes": "24751"
},
{
"name": "HTML",
"bytes": "14570791"
},
{
"name": "JavaScript",
"bytes": "511007"
},
{
"name": "Python",
"bytes": "5842419"
},
{
"name": "Shell",
"bytes": "2834"
}
],
"symlink_target": ""
} |
import itertools
import os
import os.path
import logging
from nltk.tag.stanford import NERTagger
import wget
from iepy.preprocess.ner.base import BaseNERRunner
from iepy.utils import DIRS, unzip_file
logger = logging.getLogger(__name__)
stanford_ner_name = 'stanford-ner-2014-01-04'
download_url_base = 'http://nlp.stanford.edu/software/'
class NonTokenizingNERTagger(NERTagger):
@property
def _cmd(self):
old = super(NonTokenizingNERTagger, self)._cmd
old = old + ["-tokenizerFactory", "edu.stanford.nlp.process.WhitespaceTokenizer"]
return old
class NERRunner(BaseNERRunner):
"""Wrapper to insert a generic callable sentence NER tagger into the pipeline.
"""
def __init__(self, ner, override=False):
super(NERRunner, self).__init__(override=override)
self.ner = ner
def run_ner(self, doc):
entities = []
# Apply the ner algorithm which takes a list of sentences and returns
# a list of sentences, each being a list of NER-tokens, each of which is
# a pairs (tokenstring, class)
ner_sentences = self.ner(doc.get_sentences())
# Flatten the nested list above into just a list of kinds
ner_kinds = (k for s in ner_sentences for (_, k) in s)
# We build a large iterator z that goes over tuples like the following:
# (offset, (token, kind))
# offset just goes incrementally from 0
z = itertools.chain(
enumerate(zip(doc.tokens, ner_kinds)),
# Add a sentinel last token to simplify last iteration of loop below
[(len(doc.tokens), (None, 'INVALID'))]
)
# Traverse z, looking for changes in the kind field. If there is a
# change of kind, we have a new set of contiguous tokens; if the kind
# of those isn't "O" (which means "other"), record the occurrence
#
# offset keeps the start of the current token run; last_kind keeps the kind.
last_kind = 'O'
offset = 0
for i, (token, kind) in z:
if kind != last_kind:
if last_kind != 'O':
# Found a new entity in offset:i
name = ' '.join(doc.tokens[offset:i])
entities.append(
self.build_occurrence(name, last_kind.lower(), name, offset, i)
)
# Restart offset counter at each change of entity type
offset = i
last_kind = kind
# Just a sanity check: verify that all NER tokens were consumed
try:
next(ner_kinds)
assert False, "ner_kinds should have been completely consumed"
except StopIteration:
# Actually the stop iteration is the expected result here
pass
return entities
class StanfordNERRunner(NERRunner):
def __init__(self, override=False):
ner_path = os.path.join(DIRS.user_data_dir, stanford_ner_name)
if not os.path.exists(ner_path):
raise LookupError("Stanford NER not found. Try running the "
"command download_third_party_data.py")
ner = NonTokenizingNERTagger(
os.path.join(ner_path, 'classifiers', 'english.all.3class.distsim.crf.ser.gz'),
os.path.join(ner_path, 'stanford-ner.jar'),
encoding='utf8')
super(StanfordNERRunner, self).__init__(ner.tag_sents, override)
def download():
logger.info("Downloading Stanford NER...")
try:
StanfordNERRunner()
except LookupError:
# Package not found, lets download and install it
if not os.path.exists(DIRS.user_data_dir):
os.mkdir(DIRS.user_data_dir)
os.chdir(DIRS.user_data_dir)
package_filename = '{0}.zip'.format(stanford_ner_name)
zip_path = os.path.join(DIRS.user_data_dir, package_filename)
wget.download(download_url_base + package_filename)
unzip_file(zip_path, DIRS.user_data_dir)
else:
logger.info("Stanford NER is already downloaded and functional.")
| {
"content_hash": "470bfb3f399014bde51c82a276df6762",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 91,
"avg_line_length": 36.660714285714285,
"alnum_prop": 0.6134924500730639,
"repo_name": "mrshu/iepy",
"id": "2bc2773595b0c3a9fc57d847ed6b22f0dd15532a",
"size": "4106",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "iepy/preprocess/ner/stanford.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "25531"
},
{
"name": "HTML",
"bytes": "26374"
},
{
"name": "JavaScript",
"bytes": "26234"
},
{
"name": "Python",
"bytes": "400269"
}
],
"symlink_target": ""
} |
'''
File name: example1.py
Author: Galaad Couillec
Date created: 10/02/2016
Date last modified: 10/06/2016
'''
from MongoManager import Mongod, MongoReplicaSet, MongoCluster
import argparse
import sys
parser = argparse.ArgumentParser(description='MongoDB cluster manager')
parser.add_argument('--initialize', dest='initialize', action='store_true', default=False, help='Initialize the environment of the cluster')
parser.add_argument('--start', dest='start', action='store_true', default=False, help='Start the cluster')
parser.add_argument('--restart', dest='restart', action='store_true', default=False, help='Restart the cluster')
parser.add_argument('--stop', dest='stop', action='store_true', default=False, help='Stop the cluster')
parser.add_argument('--clean', dest='clean', action='store_true', default=False, help='Clean the environment of the cluster')
args = vars(parser.parse_args())
initialize = args["initialize"]
start = args["start"]
restart = args["restart"]
stop = args["stop"]
clean = args["clean"]
cluster = MongoCluster(hostname="azimut",username="casket")
if start:
cluster.initialize()
cluster.start()
elif restart:
cluster.restart()
elif stop:
cluster.stop()
elif clean:
cluster.clean()
| {
"content_hash": "2cc115459e9be395acfd6b35300f464e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 140,
"avg_line_length": 33.891891891891895,
"alnum_prop": 0.7232854864433812,
"repo_name": "afissegalaad/MongoManager",
"id": "09351a12821f6e9d0be3e271d73d39bb858db76c",
"size": "1277",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "example1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19804"
}
],
"symlink_target": ""
} |
from flask import Blueprint
api = Blueprint('api', __name__)
from . import views, errors # noqa
| {
"content_hash": "68377a34bb4e6a6867309bba443f0795",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 19.6,
"alnum_prop": 0.6938775510204082,
"repo_name": "jatindhankhar/aslo-v3",
"id": "a7b4fbb5826afc0633cff5be7078f90ccc134918",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aslo/api/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "84716"
},
{
"name": "Dockerfile",
"bytes": "186"
},
{
"name": "HTML",
"bytes": "29940"
},
{
"name": "JavaScript",
"bytes": "198"
},
{
"name": "Python",
"bytes": "37199"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
filename = ''
customMitralCount = 2 # Full net 635=5*127
customGranulesPerMitralCount = 10 # Full net has total of 122166 GCs
#connectivity
makeSynConns = True
enableFIsyn = True
enableAmpaNmdasyn = True
enableOdorInput = True | {
"content_hash": "39b5342fbca6491315c9291d6acac086",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.771551724137931,
"repo_name": "JustasB/MitralSuite",
"id": "87c5cebc1ebbe23c84a037732944f7bdbfd69e46",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Models/Migliore2014/custom_params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "171209"
},
{
"name": "C",
"bytes": "1392"
},
{
"name": "Gnuplot",
"bytes": "3690"
},
{
"name": "Jupyter Notebook",
"bytes": "1128050"
},
{
"name": "Matlab",
"bytes": "28779"
},
{
"name": "Python",
"bytes": "85873"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.test_support import py3k_bytes as bytes
from test.test_support import gc_collect
from test.script_helper import run_python
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read',
'tell', 'truncate', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
# methods with one argument
self.assertRaises(ValueError, self.f.readinto, 0)
self.assertRaises(ValueError, self.f.write, 0)
self.assertRaises(ValueError, self.f.seek, 0)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write('a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
# Skip test
return
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_surrogates(self):
# Issue #8438: try to open a filename containing surrogates.
# It should either fail because the file doesn't exist or the filename
# can't be represented using the filesystem encoding, but not because
# of a LookupError for the error handler "surrogateescape".
filename = u'\udc80.txt'
try:
with _FileIO(filename):
pass
except (UnicodeEncodeError, IOError):
pass
# Spawn a separate Python process with a different "file system
# default encoding", to exercise this further.
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
if ('UnicodeEncodeError' not in out and
'IOError: [Errno 2] No such file or directory' not in out):
self.fail('Bad output: %r' % out)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| {
"content_hash": "96d0d7971844a268215403124ffdabac",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 83,
"avg_line_length": 31.938356164383563,
"alnum_prop": 0.5370648366573737,
"repo_name": "ArneBab/pypyjs",
"id": "aecea18b4e7a6e68f313263c8da469207aebb916",
"size": "14038",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/demo/home/rfk/repos/pypy/lib-python/2.7/test/test_fileio.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import re
import pandas as pd
import numpy as np
import glob
import os
import nltk
from nltk import word_tokenize
##########################################################
#Preliminary Functions
##########################################################
def group_text(text, group_size):
"""
This function groups a text into text groups.
It returns a list of grouped strings.
"""
word_list = text.split()
group_list = []
for k in range(len(word_list)):
start = k
end = k + group_size
group_slice = word_list[start: end]
# Append only groups of proper length/size
if len(group_slice) == group_size:
group_list.append(" ".join(group_slice))
return group_list
def remove_non_ascii_2(text):
import re
return re.sub(r'[^\x00-\x7F]+', "", text)
def read_speech(speechfile):
speech = str(speechfile)
f = open(speech, 'rU')
raw = f.read()
raw1 = raw.replace('.', ' ')
sent = remove_non_ascii_2(raw1)
return sent
def make_text(file, path=''):
if path=='':
filepath = file
else:
filepath = path+"/"+file
f = open(filepath)
raw0 = f.read()
raw1 = remove_non_ascii_2(raw0)
tokens = word_tokenize(raw1)
text = nltk.Text(tokens)
return text
def get_url(speechfile):
speech = str(speechfile)
f = open(speech, 'rU')
raw = f.read()
sent = remove_non_ascii_2(raw)
url = sent.split('\n')[1]
return url
def get_group_set(group_size, text):
group_list = group_text(text, group_size)
group_set = set(group_list)
return group_set
def ngram(n, data):
ngram = get_group_set(n, data)
return ngram
##########################################################
#Speech Phrase Counter Functions
##########################################################
def find_time(text):
#Add Time to Data Frame
try:
try:
time = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
return time[0]
except:
try:
time = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
return time[0]
except:
time = re.findall(r'\d{1,2}(?:(?:AM|PM)|(?::\d{1,2})(?:AM|PM)?)', sent)
return time[0]
except:
pass
def return_time(text):
#Add Time to Data Frame
try:
try:
time0 = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
time = time0[0].replace('P M ', 'PM').replace('A M ', 'AM')
return time
except:
try:
time = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
return time[0]
except:
time = re.findall(r'\d{1,2}(?:(?:AM|PM)|(?::\d{1,2})(?:AM|PM)?)', sent)
return time[0]
except:
pass
def speech_phrase_counter(ngram1, ngram2, ngram3, ngram4, terms, df, n, sent):
"""
This function counts the occurence of ngrams of size 1, 2, 3, and 4.
These are defined externally.
--------------------------------------------------------------------
It requires a list of terms. These can be of any number of ngrams
sizes 1-4.
--------------------------------------------------------------------
This function also requires a data frame (df). This should be
a Pandas data frame in memory. It also requires an index term, n,
to add the counts to the correct cell in the df.
--------------------------------------------------------------------
The sent term is the processed text returned from
read_speech(speechfile)
--------------------------------------------------------------------
NOTE: This function is designed to be called within the function
speech_classifier.
--------------------------------------------------------------------
"""
for term in terms:
for gram in ngram4:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
for gram in ngram3:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
for gram in ngram2:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
for gram in ngram1:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
##########################################################
#Setup Data Frame
##########################################################
def speech_classifier(folder_name, ds1, ds2, output_file, terms, metric=0, addtime=0, addloc=0, addcite=0):
"""
---------------------------------------------------------------
Variables
- folder_name = path/name of folder where speeches are found
---------------------------------------------------------------
- ds1:ds2 = - date slices of filenames
E.g. the filename "2011-09-17_ID1.txt"
would want date slices of
ds1 = 0 and ds2 = 10
This takes the string slice 0:10
and provides a date = 2011-09-17
---------------------------------------------------------------
- output_file = the name of the desired CSV
---------------------------------------------------------------
- terms = the list of terms to look for in the speeches
---------------------------------------------------------------
- metric = option to add tokens, alphanumberic words,
and vocabulary metrics to dataset
using NLTK
---------------------------------------------------------------
- addtime = option to try to extract time terms
default=0 == ignore (1 = run)
---------------------------------------------------------------
- addloc = option to try to extract location terms
default=0 == ignore (1 = run)
---------------------------------------------------------------
- addcite = option to try to extract url Citation
default=0 == ignore (1 = run)
this works for text files where the parser
adds the parsed URL to the first line of the
speech file.
---------------------------------------------------------------
"""
#Setup Initial Data Frame
header = ["DATE", "TIME", "LOCATION", "URL", "TOKENS", "WORDS", "UNIQUE_WORDS"]+terms
index = np.arange(0)
df = pd.DataFrame(columns=header, index = index)
#Get Files in Folder
folder = str(folder_name)
outfile = str(output_file)
os.chdir(folder)
speech_files = glob.glob("*.txt")
for speech in speech_files:
date = speech[ds1:ds2]
print ("Analyzing speech file {} ... {}".format(speech, date))
n = len(df.index)
#Add Row to Data Frame
df.loc[n] = 0
df.ix[n, "DATE"] = date
sent = read_speech(speech)
#Add Time to Data Frame
if addtime == 1:
time = return_time(sent)
if len(str(time)) > 15:
time = str(time)[0:12]
else:
pass
df.ix[n, "TIME"] = time
else:
pass
#Add Location
if addloc == 1:
try:
time_ = find_time(sent)
location0 = sent
location1 = location0.replace(time_, '|').split('|', 1)[0]
location2 = location1.replace('\n\n', '|').replace('|\n', '|').replace('| ', '').split('|')
X = len(location2)-2
location3 = location2[X]
location = location3.replace('\n', ', ').replace('\t', '')
except:
location = ''
pass
if len(str(location)) > 25:
location = str(location)[0:35]
print ("Exception: {} ...".format(location))
else:
pass
df.ix[n, "LOCATION"] = location
else:
pass
#Add Citation/URL
if addcite == 1:
url = get_url(speech)
df.ix[n, "URL"] = url
else:
pass
#Add Tokens, Words, Unique Words
if metric==0:
pass
else:
text = make_text(speech)
words = [w.lower() for w in text if w.isalpha()]
df.ix[n, "TOKENS"] = len(text)
df.ix[n, "WORDS"] = len(words)
df.ix[n, "UNIQUE_WORDS"] = len(set(words))
#Add Keyword Data
ngram1 = get_group_set(1, sent)
ngram2 = get_group_set(2, sent)
ngram3 = get_group_set(3, sent)
ngram4 = get_group_set(4, sent)
#Count Keywords
speech_phrase_counter(ngram1, ngram2, ngram3, ngram4, terms, df, n, sent)
os.chdir("..")
print (df)
df.to_csv(outfile, encoding='utf-8')
return df
| {
"content_hash": "b23950dda5cd87801a11982059ecb118",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 107,
"avg_line_length": 26.38305084745763,
"alnum_prop": 0.5129127585763844,
"repo_name": "jmausolf/Python_Tutorials",
"id": "b3f17bad419edf5b64ba1dd39c1afef655be5a6f",
"size": "8036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Text_Keyword_Counter/text_keyword_classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "538082"
},
{
"name": "Python",
"bytes": "45125"
},
{
"name": "Stata",
"bytes": "1415"
}
],
"symlink_target": ""
} |
"""Tests the tdb data store - in memory implementation."""
import shutil
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import tdb_data_store
# pylint: mode=test
class TDBTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
config_lib.CONFIG.Set("Datastore.location", "%s/tdb_test/" % self.temp_dir)
self.DestroyDatastore()
data_store.DB = tdb_data_store.TDBDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
def testCorrectDataStore(self):
self.assertTrue(isinstance(data_store.DB, tdb_data_store.TDBDataStore))
def DestroyDatastore(self):
try:
shutil.rmtree(config_lib.CONFIG.Get("Datastore.location"))
except (OSError, IOError):
pass
class TDBDataStoreTest(TDBTestMixin, data_store_test.DataStoreTest):
"""Test the tdb data store."""
class TDBDataStoreBenchmarks(TDBTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the TDB data store abstraction."""
class TDBDataStoreCSVBenchmarks(TDBTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the TDB data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "f5ffc03a09a0e310238680c4bc098269",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 27.295081967213115,
"alnum_prop": 0.6984984984984985,
"repo_name": "ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert",
"id": "3ea25ae3b2f418bbc4bd7b9f5ea249e000e2f7b0",
"size": "1687",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/data_stores/tdb_data_store_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36573"
},
{
"name": "JavaScript",
"bytes": "831111"
},
{
"name": "Makefile",
"bytes": "5482"
},
{
"name": "Perl",
"bytes": "483"
},
{
"name": "Python",
"bytes": "4517593"
},
{
"name": "Shell",
"bytes": "31210"
}
],
"symlink_target": ""
} |
"""
==============================================================
Time-frequency representations on topographies for MEG sensors
==============================================================
Both average power and intertrial coherence are displayed.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne import io
from mne.time_frequency import tfr_morlet
from mne.datasets import somato
###############################################################################
# Set parameters
data_path = somato.data_path()
raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
event_id, tmin, tmax = 1, -1., 3.
# Setup for reading the raw data
raw = io.Raw(raw_fname)
baseline = (None, 0)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6))
###############################################################################
# Calculate power and intertrial coherence
freqs = np.arange(6, 30, 3) # define frequencies of interest
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=False,
return_itc=True, decim=3, n_jobs=1)
# Baseline correction can be applied to power or done in plots
# To illustrate the baseline correction in plots the next line is commented
# power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
# Inspect power
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio')
import matplotlib.pyplot as plt
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', vmin=-0.45, vmax=0.45)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', vmin=-0.45, vmax=0.45)
mne.viz.tight_layout()
# Inspect ITC
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
| {
"content_hash": "bb4d68ab3a2d44e42f8900758ca40b4b",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 38.184615384615384,
"alnum_prop": 0.5979049153908138,
"repo_name": "effigies/mne-python",
"id": "39a643187ee00d6639ce6c607c67b2e1a723ab4c",
"size": "2482",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/time_frequency/plot_time_frequency_sensors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16734"
},
{
"name": "Makefile",
"bytes": "3645"
},
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "3718090"
},
{
"name": "Shell",
"bytes": "4057"
}
],
"symlink_target": ""
} |
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("data_import", "0022_auto_20191105_1924")]
operations = [
migrations.AlterField(
model_name="datatype",
name="description",
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name="datatype",
name="name",
field=models.CharField(
max_length=40,
unique=True,
validators=[
django.core.validators.RegexValidator(
"^[\\w\\-\\s]+$",
"Only alphanumeric characters, space, dash, and underscore are allowed.",
)
],
),
),
]
| {
"content_hash": "c721740393f5617cdbc1dd180f3b7832",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 97,
"avg_line_length": 29.17241379310345,
"alnum_prop": 0.49527186761229314,
"repo_name": "PersonalGenomesOrg/open-humans",
"id": "cd8f0c943497f2396975497cc6fbda1da6479ac4",
"size": "895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data_import/migrations/0023_auto_20191106_2330.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19829"
},
{
"name": "HTML",
"bytes": "296839"
},
{
"name": "JavaScript",
"bytes": "25622"
},
{
"name": "Python",
"bytes": "435909"
},
{
"name": "Shell",
"bytes": "721"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class HoaxConfig(AppConfig):
name = 'hoax'
| {
"content_hash": "5d3535c7839a42a1a7016cb64637615c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 16.6,
"alnum_prop": 0.7349397590361446,
"repo_name": "barliant/fnc-id",
"id": "7cae59a87806563e74ebbc94dfa93cac08b3ae76",
"size": "83",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_project/old/hoaxdetector/hoax/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "690138"
},
{
"name": "HTML",
"bytes": "35222"
},
{
"name": "JavaScript",
"bytes": "18906"
},
{
"name": "Jupyter Notebook",
"bytes": "1743756"
},
{
"name": "Python",
"bytes": "89660"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import numpy as np
import netCDF4 as nc
from .base_grid import BaseGrid
class DaitrenRunoffGrid(BaseGrid):
def __init__(self, h_grid_def, description='Daitren runoff regular grid'):
self.type = 'Arakawa A'
self.full_name = 'Daitren_runoff'
try:
with nc.Dataset(h_grid_def) as f:
x_t = f.variables['xc'][:]
y_t = f.variables['yc'][:]
clon_t = f.variables['xv'][:]
clat_t = f.variables['yv'][:]
mask_t = f.variables['mask'][:]
area_t = f.variables['area'][:]
except IOError:
print('Error opening {}'.format(h_grid_def), file=sys.stderr)
sys.exit(1)
super(DaitrenRunoffGrid, self).__init__(x_t=x_t, y_t=y_t,
clon_t=clon_t, clat_t=clat_t,
mask_t=mask_t, area_t=area_t,
description=description)
| {
"content_hash": "fbd20243bdac7cac04ba8ede11bd6db6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 35.5,
"alnum_prop": 0.48169014084507045,
"repo_name": "DoublePrecision/esmgrids",
"id": "bc3fdc74f661a41bcc9b0ce25a369a0bab34239f",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esmgrids/daitren_runoff_grid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71990"
}
],
"symlink_target": ""
} |
from synapse.exc import *
def chopurl(url):
'''
A sane "stand alone" url parser.
Example:
info = chopurl(url)
'''
ret = {}
if url.find('://') == -1:
raise BadUrl(':// not found!')
scheme,remain = url.split('://', 1)
ret['scheme'] = scheme.lower()
# carve query params from the end
if remain.find('?') != -1:
query = {}
remain,queryrem = remain.split('?',1)
for qkey in queryrem.split('&'):
qval = None
if qkey.find('=') != -1:
qkey,qval = qkey.split('=',1)
query[qkey] = qval
ret['query'] = query
pathrem = ''
slashoff = remain.find('/')
if slashoff != -1:
pathrem = remain[slashoff:]
remain = remain[:slashoff]
# detect user[:passwd]@netloc syntax
if remain.find('@') != -1:
user, remain = remain.rsplit('@',1)
if user.find(':') != -1:
user,passwd = user.split(':',1)
ret['passwd'] = passwd
ret['user'] = user
# remain should be down to host[:port]
# detect ipv6 [addr]:port syntax
if remain.startswith('['):
hostrem,portstr = remain.rsplit(':',1)
ret['port'] = int( portstr )
ret['host'] = hostrem[1:-1]
# detect ipv6 without port syntax
elif remain.count(':') > 1:
ret['host'] = remain
# regular old host or host:port syntax
else:
if remain.find(':') != -1:
remain,portstr = remain.split(':',1)
ret['port'] = int(portstr)
ret['host'] = remain
ret['path'] = pathrem
return ret
| {
"content_hash": "53afee8d87e6bd89a0ce7c901f28b383",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 48,
"avg_line_length": 23.285714285714285,
"alnum_prop": 0.5,
"repo_name": "imjonsnooow/synapse",
"id": "8e2b97b3a0b691f604fc18745183d5c6ffd60f44",
"size": "1631",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/lib/urlhelp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "162309"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(4, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
# rbf is disabled in Pura Core
# self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| {
"content_hash": "d760457f7797791f058c5bbaedae249d",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 113,
"avg_line_length": 50.91752577319588,
"alnum_prop": 0.5739015995140717,
"repo_name": "amiller87/puracore",
"id": "f761c4162e919a662217e7dc3c2d1962d81fa2d4",
"size": "10130",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/listtransactions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1338637"
},
{
"name": "C++",
"bytes": "5452158"
},
{
"name": "CSS",
"bytes": "122189"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "158630"
},
{
"name": "Makefile",
"bytes": "97785"
},
{
"name": "Objective-C",
"bytes": "4937"
},
{
"name": "Objective-C++",
"bytes": "7224"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "715136"
},
{
"name": "QMake",
"bytes": "2055"
},
{
"name": "Roff",
"bytes": "3688"
},
{
"name": "Shell",
"bytes": "35621"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from math import log
from statistics import variance
import time
from random import randint
# Create a time stamp for data logging
def timestamp():
# Get current time
t = time.localtime()
stamp = str(t.tm_year) + format(t.tm_mon, '02.0f') + format(t.tm_mday, '02.0f')
stamp += '_' + format(t.tm_hour, '02.0f') + format(t.tm_min, '02.0f') + format(t.tm_sec, '02.0f')
return stamp
# Create (append to) a log file
# V1.00 - initial version of a data log for thesis project (each image run gets own log)
# V1.01 - changed extension from .log to .csv; included log name in log header; Fixed Column Headers
# V1.02 - added SEED placeholder to hold seed value used to create results
# V1.03 - added SEED data
# V1.04 - added 0.0001% to noise case, and added commas to make into clean csv
def datalog( D, filename ):
''' log data stored in dictionary D into file named filename '''
exists = False
LOG_VERSION = "1.04" # keep track of changes to log format with this value
try:
log = open(filename, 'r')
exists = True
log.close()
except:
pass
log = open(filename, 'a')
if not exists:
# Create Log Header:
log.write("# Datalog Version: " + LOG_VERSION + ',,,,,,,,,,,,,,\n')
log.write("# Log Name: " + D['logname'] + ',,,,,,,,,,,,,,\n')
log.write("# Image Name: " + D['image-name'] + ',,,,,,,,,,,,,,\n')
log.write("# Image Path: " + D['imagepath'] + ',,,,,,,,,,,,,,\n')
log.write("# Image Resolution: " + D['resolution'] + ',,,,,,,,,,,,,,\n')
log.write("# Base Signal: " + str(D['base-signal']) + ',,,,,,,,,,,,,,\n')
log.write("# Noise Model: " + D['noise-model'] + ',,,,,,,,,,,,,,\n')
log.write("# Noise Seed: " + str(D['seed']) + ',,,,,,,,,,,,,,\n')
# -- List grid sizes: G#, width1, width2... --
log.write("# Grid Widths: ") # + str(len(D['results'])) + ',')
for i in range(len(D['results'])):
log.write(str(D['results'][i][1]) + '|')
log.write(',,,,,,,,,,,,,,\n')
# -- Column headers --
log.write("# Timestamp,Noise%,Signal,Gridcount,")
for i in range(len(D['results'])):
log.write(str(D['results'][i][1]) + ',')
log.write("Dimension,Variance\n")
# Create new line in data log
log.write(timestamp() + ',')
#log.write(D['image'] + ',')
log.write(str(D['noise']) + ',')
log.write(str(D['signalnoise']) + ',')
log.write('R' + str(len(D['results'])) + ',')
for i in range(len(D['results'])):
log.write(str(D['results'][i][0]) + ',')
log.write(str(D['dimension']) + ',')
log.write(str(D['variance']))
log.write('\n')
log.close()
return
# Small helper function: imports the png image file,
# strips the alpha channel, and returns a numpy array
# containing the red-green-blue channel for each pixel
def get_rgb_from(filename):
img3D = mpimg.imread(filename, ) # read png img file
img3D = np.delete(img3D,np.s_[3:],axis=2) # strips alpha channel
# condense into true 2D array
img = np.empty( [len(img3D),len(img3D[0])],dtype=int )
for row in range( len(img) ):
for col in range( len(img[0]) ):
if img3D[row][col].sum() == 0:
img[row][col] = 0
else:
img[row][col] = 1
return img
# create a Uniform noise model with percent as parameter
# Make sure to seed the noise!
def addUniformNoise(inputArray, percent, seed):
# Need to calculate the threshold of the noise added based off of size of input Array
# i.e. if input array is 2100 x 1800 pixels, and percent = 1%,
# Then we would calculate: 2100x1800*0.01 = 37800 as threshold
# Minimum number = 0, Maximum number = 2100x1800
# apply the seed
np.random.seed(seed)
# create uniform matrix between 0 and 1
noise = np.random.uniform(size=(len(inputArray),len(inputArray[0])))
# only select percentage value of numbers as valid noise
threshold = percent / 100
noise = np.where( noise < threshold, -1, 0 ) # -1 signals probabilty of noise
# where inputArray is 0 (black), flip noise sign to + to attenuate signal
noise = np.where( inputArray < 1, noise*-1, noise )
# now add noise to image
inputArray = inputArray + noise
''' old noise -- only additive
inputArray = inputArray + noise
inputArray = np.where( inputArray > 0, 1, 0 )
'''
return inputArray
# add Noise (-1's in our case) to our image array)
def addNoise( inputArray, threshold, maxThreshold ):
# 1st create a random array from 0 to maxThreshold value that is the
# same size as our input array
noise = np.random.randint(maxThreshold, size=(len(inputArray),len(inputArray[0])))
# next, set the noise level to -1 for noisy image (would reduce a signal of 1 to noise of 0)
# based on the threshold value
noise = np.where( noise < threshold, -1, 0 )
# Add Noise
inputArray = inputArray + noise
# Re-normalize
inputArray = np.where( inputArray > 0, 1, 0 )
return inputArray
# count number of black pixels ([0,0,0])
def countSignal( img ):
return len(img)*len(img[0])-img.sum()
'''
pixels = 0
for row in range( len(img) ):
for col in range( len(img[0]) ):
if img[row][col] == 0:
pixels += 1
return pixels
'''
# test a small box of the image --
# return True of the box contains a black pixel ([0,0,0])
def testBox( img, x, y, d ):
height = len(img)
width = len(img[0])
for h in range(y, y+d):
if img[h][x:x+d].sum() < d:
return True
#for w in range(x, x+d):
# if( h < height ) and (w < width):
# if img[h][w] == 0:
# return True
return False
# pass the img array and a box width
# returns number of boxes counted at particular width
def boxCount( img, d ):
#print("> Testing box size", d)
height = len(img)
width = len(img[0])
# verify d size is smaller than image dimensions
if( d > min(height, width) ):
print("[ERROR] boxCount box width exceeds image dimensions")
return(0,0)
counted = 0
for y in range(0, height, d):
for x in range(0, width, d):
if( testBox(img, x, y, d) ):
counted += 1
return (counted, d)
# Convert an array of tuples of the form (Boxes Counted, Box Width) into a
# Fractal Dimension Estimate
# Pack data into a dictionary that can store multiple results data
# D['key'] => value
# D['dimension'] => Fractal Dimension
# D['results'] => List of tuples ( Boxes Counted, Box Width )
# D['l_results'] => List of tuples ( log(Boxes Counted), log(1/Box Width) )
# D['slopes'] => Array of log/log slopes
def bcaConvertResults( results ):
D = {}
D['results'] = results
log_results = []
for result in results:
if( result[0] == 0 ):
log_results.append( (log(1), log(1/result[1])) )
else:
log_results.append( (log(result[0]), log(1/result[1])) )
D['l_results'] = log_results
# calculate Dimensional estimate (using average)
slopes = []
for i in range(1,len(log_results)):
slopes.append( (log_results[i][0]-log_results[i-1][0])/(log_results[i][1]-log_results[i-1][1]) )
D['slopes'] = slopes
D['variance'] = variance(slopes)
# the average of the slopes gives us a fractal dimension estimate
avg = 0
for s in slopes:
avg += s
avg /= (len(slopes))
D['dimension'] = avg
return D
# Box Count Algorithm takes an img array (3D array containing pixel data in the
# form [r g b] each ranging from 0-1, arranged in a 2D matrix.
# [ [ [1,1,1], [1,1,1], ...
# [ [ [1,0,1], ...
# [ [ ...
# and takes a grid containing all the box widths we will test against
def boxCountAlgorithm( img, grid ):
results = [] # (boxes counted, d)
for d in grid:
r = boxCount(img,d)
results.append(r)
D = bcaConvertResults( results )
return D
# Print nicely formatted timestamp
def timeit( seconds ):
m = int(seconds//60)
s = int(seconds - m*60)
ms = int( (seconds - m*60 - s)*100 )
return format(m, '02d') + ':' + format(s, '02d') + '.' + format(ms, '03d')
# Create a pre-formated 2x2 plot of the output data.
# a, b, c, and d are arrays of tuples containing (Dimension, Noise%) data
def plot2x2( a, b, c, d ):
noise = []
for tup in a:
noise.append(tup[1])
ay = []
by = []
cy = []
dy = []
for i in range(len(a)):
ay.append(a[i][0])
by.append(b[i][0])
cy.append(c[i][0])
dy.append(d[i][0])
fig = plt.figure(1)
fig.suptitle('Fractal Dimension Results', fontsize = 20)
ax = fig.add_subplot(221)
ax.set_title('Circle')
ax.set_xlabel('Noise')
ax.set_ylabel('Dimension')
plt.plot(noise,ay,linestyle='-', linewidth=1.0)
ax = fig.add_subplot(222)
ax.set_title('KochSnowflake')
ax.set_xlabel('Noise')
ax.set_ylabel('Dimension')
plt.plot(noise,by,linestyle='-', linewidth=1.0)
ax = fig.add_subplot(223)
ax.set_title('Canopy')
ax.set_xlabel('Noise')
ax.set_ylabel('Dimension')
plt.plot(noise,cy,linestyle='-', linewidth=1.0)
ax = fig.add_subplot(224)
ax.set_title('Checkers')
ax.set_xlabel('Noise')
ax.set_ylabel('Dimension')
plt.plot(noise,dy,linestyle='-', linewidth=1.0)
plt.show()
# Create a pre-formated 2x2 plot of the output data.
# a, b, c, and d are arrays of tuples containing (Dimension, Noise%) data
def plot2( a, b ):
noise = []
for tup in a:
noise.append(tup[1])
ay = []
by = []
for i in range(len(a)):
ay.append(a[i][0])
by.append(b[i][0])
plt.figure(num=1, figsize=(12,8))
fig = plt.figure(1)
fig.suptitle('Fractal Dimension Results', fontsize = 20)
ax = fig.add_subplot(2,1,1)
ax.set_title('Fractal Dimension vs. Noise')
#ax.set_xlabel('Noise')
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_ylabel('Dimension')
plt.plot(noise,ay,linestyle='-', linewidth=1.0)
bx = fig.add_subplot(2,1,2, sharex=ax)
bx.set_title('Variance vs. Noise')
bx.set_xlabel('Noise')
bx.set_ylabel('Variance')
plt.plot(noise,by,linestyle='-', linewidth=1.0)
#plt.plot(noise,ay,linestyle='-', linewidth=2.0)
plt.show()
# Noise generator increases resolution every factor of 10
def noise_generator( maximum ):
noise = 0
while noise <= maximum*10000:
yield noise/10000.0
if noise == 0:
noise = 1 # 0.0001% noise case...
elif noise == 1:
noise = 10 # Jump to 0.001% noise case.
elif noise < 100:
noise += 10
elif noise < 1000:
noise += 100
elif noise < 10000:
noise += 1000
elif noise < 100000:
noise += 10000
else:
noise += 50000
def main():
# To Run, enter as arguments:
# (1) Path to Image
# (2) Noise Model: (-u) uniform (-g) gaussian
# (3) Noise Seed: (1-1000)
if( len(sys.argv) != 4 ):
sys.exit("[ERROR] Insufficient Arguments. [Image Path] [Noise Model] [Seed]")
if( sys.argv[2] == "-u" ):
NOISE_MODEL = "uniform"
elif( sys.argv[2] == "-g" ):
NOISE_MODEL = "gaussian"
else:
sys.exit("[ERROR] Noise Model: -u (uniform) or -g (gaussian)")
try:
SEED = int(sys.argv[3])
except:
sys.exit("[ERROR] Invalid SEED Argument")
image = sys.argv[1]
print("@=========================================@")
print("| Simulation Setup")
print("| Image:",image)
print("| Noise:",NOISE_MODEL)
print("| Seed:",SEED)
print("@=========================================@")
start_time = time.time()
# Test Setup
GRID = [3, 5, 10, 20, 30, 50, 100, 150, 300]
# create datalog id: filename-timestamp
split_path = image.split('/')
image_prefix = split_path[-1].split('.')
logName = image_prefix[0]
if( NOISE_MODEL == "uniform" ):
logName += "_u"
else:
logName += "_g"
logName += format(SEED, '03.0f') + "-" + timestamp() + ".csv"
# Open the image and convert to 2D nparray
img = get_rgb_from(image)
print("Image imported")
print("---", timeit(time.time()-start_time), "---")
print("Array is",len(img),"x",len(img[0]))
height = len(img)
width = len(img[0])
base_signal = countSignal( img )
dim_data = [] # Prepare to append array of tuples containing (Dimension, Noise%)
var_data = []
for noise in noise_generator(50):
print(" > Noise: " + str(noise) + "%", end=" ")
if( NOISE_MODEL == "uniform" ):
newimg = addUniformNoise( img, noise, SEED )
else:
sys.exit("[ERROR] Gaussian Noise Model Not Yet Implemented")
c = countSignal( newimg )
#print("Signal Counted:",c)
#print("---", timeit(time.time()-start_time), "---")
D = boxCountAlgorithm(newimg, GRID)
# Log the results
D['noise-model'] = NOISE_MODEL
D['seed'] = SEED
D['image-name'] = split_path[-1]
D['resolution'] = str(width) + 'x' + str(height)
D['base-signal'] = base_signal
D['logname'] = logName
D['signalnoise'] = c # counts number of signal + noise pixels remaining in image
D['imagepath'] = image
D['noise'] = noise
datalog(D, logName)
#print("Fractal Dimension:", D['dimension'])
print(" > Dimension (" + format(D['dimension'], '.3f') + "); BCA completed: ", timeit(time.time()-start_time))
dim_data.append( (D['dimension'], noise) )
var_data.append( (D['variance'], noise) )
print(">> Complete. Log file:", logName)
main()
print("main loop complete. check for log")
sys.exit()
###''' START CODE BELOW '''
###start_time = time.time()
'''
# perfecting uniform noise test
img = get_rgb_from("test_images/Larger/checkers.png")
plt.imshow(img, cmap="Greys_r")
plt.show()
for i in range(10,110,10):
newimg = addUniformNoise(img, i,101)
plt.imshow(newimg, cmap="Greys_r")
plt.show()
input("pause")
'''
# Test new dimensional function
# Now let's import an image
# Image is 1800 pixels tall and 2100 pixels wide. We will use box sizes of
# 2, 5, 10, 20, 30, 50, 100, 150, 300
# NOTE: 9 iterations of 2100x1800 pixels means we could check up to
# 34 million pixels per image
'''
a = [(1, 0), (1.2, 1), (1.3, 2), (1.4, 3)]
b = [(1.2, 0), (1.3, 1), (1.4, 2), (1.5, 3)]
c = [(1.6, 0), (1.7, 1), (1.8, 2), (1.9, 3)]
d = [(2, 0), (1.9, 1), (2, 2), (2, 3)]
plot6330( a, b, c, d )
input("stopped")
'''
# Test setup: box width list and list of images
#GRID = [3, 5, 10, 20, 30, 50, 100, 150, 300]
#NOISE_MODEL = "uniform"
#SEED = 101
#image_library = ["test_images/Larger/norwaymap.png"]
image_library = []
image_library.append("test_images/Larger/kochSnowflake.png")
#image_library = []
#for i in range(5,55,5):
# image_library.append("test_images/Larger/fallleaf/"+str(i)+"fallleaf.png")
#image_library.append("test_images/Larger/blank.png")
dim_test = [] # Holds the results of each images noise vs. dimension test
var_test = []
# dim_test.append(dim_data)
#var_test.append(var_data)
#print("--- Finished in: ", timeit(time.time()-start_time), "---")
# When we are done processing, let's plot the results:
#plot2( dim_test[0], var_test[0] )
| {
"content_hash": "24ad33b154454c85c1b2a0683644a49b",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 118,
"avg_line_length": 30.341040462427745,
"alnum_prop": 0.5717914523401283,
"repo_name": "mcm7f/thesis",
"id": "72436ea04b0f8847b0ca43578dafa8ebdcd87701",
"size": "15878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fractal_dimension_streamlined.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9697"
},
{
"name": "Python",
"bytes": "150162"
}
],
"symlink_target": ""
} |
"""Python source file include Penguin pipeline functions and necessary utils.
The utilities in this file are used to build a model with scikit-learn.
This module file will be used in Transform and generic Trainer.
"""
import os
import pickle
from typing import Tuple
import absl
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.dsl.io import fileio
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
_FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
# The Penguin dataset has 342 records, and is divided into train and eval
# splits in a 2:1 ratio.
_TRAIN_DATA_SIZE = 228
_TRAIN_BATCH_SIZE = 20
def _input_fn(
file_pattern: str,
data_accessor: DataAccessor,
schema: schema_pb2.Schema,
batch_size: int = 20,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates features and label for tuning/training.
Args:
file_pattern: input tfrecord file pattern.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: schema of the input data.
batch_size: An int representing the number of records to combine in a single
batch.
Returns:
A (features, indices) tuple where features is a matrix of features, and
indices is a single vector of label indices.
"""
record_batch_iterator = data_accessor.record_batch_factory(
file_pattern,
dataset_options.RecordBatchesOptions(batch_size=batch_size, num_epochs=1),
schema)
feature_list = []
label_list = []
for record_batch in record_batch_iterator:
record_dict = {}
for column, field in zip(record_batch, record_batch.schema):
record_dict[field.name] = column.flatten()
label_list.append(record_dict[_LABEL_KEY])
features = [record_dict[key] for key in _FEATURE_KEYS]
feature_list.append(np.stack(features, axis=-1))
return np.concatenate(feature_list), np.concatenate(label_list)
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
x_train, y_train = _input_fn(fn_args.train_files, fn_args.data_accessor,
schema)
x_eval, y_eval = _input_fn(fn_args.eval_files, fn_args.data_accessor, schema)
steps_per_epoch = _TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE
estimator = MLPClassifier(
hidden_layer_sizes=[8, 8, 8],
activation='relu',
solver='adam',
batch_size=_TRAIN_BATCH_SIZE,
learning_rate_init=0.0005,
max_iter=int(fn_args.train_steps / steps_per_epoch),
verbose=True)
# Create a pipeline that standardizes the input data before passing it to an
# estimator. Once the scaler is fit, it will use the same mean and stdev to
# transform inputs at both training and serving time.
model = Pipeline([
('scaler', StandardScaler()),
('estimator', estimator),
])
model.feature_keys = _FEATURE_KEYS
model.label_key = _LABEL_KEY
model.fit(x_train, y_train)
absl.logging.info(model)
score = model.score(x_eval, y_eval)
absl.logging.info('Accuracy: %f', score)
# Export the model as a pickle named model.pkl. AI Platform Prediction expects
# sklearn model artifacts to follow this naming convention.
os.makedirs(fn_args.serving_model_dir)
model_path = os.path.join(fn_args.serving_model_dir, 'model.pkl')
with fileio.open(model_path, 'wb+') as f:
pickle.dump(model, f)
| {
"content_hash": "ead2d1985f1b9e24d9c832785ec4539a",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 32.378151260504204,
"alnum_prop": 0.7093174150012976,
"repo_name": "tensorflow/tfx",
"id": "6bd421128605f8e5f927463d7dca92b15a16b6f0",
"size": "4450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfx/examples/penguin/experimental/penguin_utils_sklearn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7405"
},
{
"name": "Jupyter Notebook",
"bytes": "38579"
},
{
"name": "Python",
"bytes": "6009050"
},
{
"name": "Shell",
"bytes": "34056"
},
{
"name": "Starlark",
"bytes": "20324"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import provider_configuration as provconf
from neutron.tests import base
class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ParseServiceProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_default_service_provider_configuration(self):
providers = cfg.CONF.service_providers.service_provider
self.assertEqual(providers, [])
def test_parse_single_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': False}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_single_default_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path:default'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': True}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_multi_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
constants.LOADBALANCER + ':name1:path1',
constants.LOADBALANCER +
':name2:path2:default'])
res = provconf.parse_service_provider_opt()
# This parsing crosses repos if additional projects are installed,
# so check that at least what we expect is there; there may be more.
self.assertTrue(len(res) >= 3)
def test_parse_service_provider_invalid_format(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
self._set_override([constants.LOADBALANCER +
':',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
def test_parse_service_provider_name_too_long(self):
name = 'a' * 256
self._set_override([constants.LOADBALANCER +
':' + name + ':driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
class ProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_ensure_driver_unique(self):
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_driver_unique, 'driver')
self.assertIsNone(pconf._ensure_driver_unique('another_driver1'))
def test_ensure_default_unique(self):
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_default_unique,
'svctype', True)
self.assertIsNone(pconf._ensure_default_unique('svctype', False))
self.assertIsNone(pconf._ensure_default_unique('svctype1', True))
self.assertIsNone(pconf._ensure_default_unique('svctype1', False))
def test_add_provider(self):
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertEqual(len(pconf.providers), 1)
self.assertEqual(list(pconf.providers.keys()),
[(constants.LOADBALANCER, 'name')])
self.assertEqual(list(pconf.providers.values()),
[{'driver': 'path', 'default': False}])
def test_add_duplicate_provider(self):
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertRaises(n_exc.Invalid, pconf.add_provider, prov)
self.assertEqual(len(pconf.providers), 1)
def test_get_service_providers(self):
self._set_override([constants.LOADBALANCER + ':name:path',
constants.LOADBALANCER + ':name2:path2',
'st2:name:driver:default',
'st3:name2:driver2:default'])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False},
{'service_type': 'st2',
'name': 'name',
'driver': 'driver',
'default': True
},
{'service_type': 'st3',
'name': 'name2',
'driver': 'driver2',
'default': True}]
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']}
)
self.assertEqual(p, [prov])
def test_get_service_providers_with_fields(self):
self._set_override([constants.LOADBALANCER + ":name:path",
constants.LOADBALANCER + ":name2:path2"])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False}]
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']},
fields=['name']
)
self.assertEqual(p, [{'name': prov['name']}])
class GetProviderDriverClassTestCase(base.BaseTestCase):
def test_get_provider_driver_class_hit(self):
driver = 'ml2'
expected = 'neutron.plugins.ml2.plugin.Ml2Plugin'
actual = provconf.get_provider_driver_class(
driver,
namespace=manager.CORE_PLUGINS_NAMESPACE)
self.assertEqual(expected, actual)
def test_get_provider_driver_class_miss(self):
retval = provconf.get_provider_driver_class('foo')
self.assertEqual('foo', retval)
class NeutronModuleTestCase(base.BaseTestCase):
def test_can_parse_multi_opt_service_provider_from_conf_file(self):
mod = provconf.NeutronModule('neutron_test')
mod.ini(base.ETCDIR)
self.assertEqual(['foo', 'bar'], mod.service_providers(),
'Expected two providers, only one read')
| {
"content_hash": "70875740cba31991b19ec1bf6b595f5e",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 77,
"avg_line_length": 42.22,
"alnum_prop": 0.5605163429654192,
"repo_name": "chitr/neutron",
"id": "21531a41a90d5856d1df87d48f127197e65cb707",
"size": "9070",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/services/test_provider_configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7647002"
},
{
"name": "Shell",
"bytes": "13342"
}
],
"symlink_target": ""
} |
from kivy.uix.widget import Widget
from kivy.uix.image import Image
from app.MainController import MainController
WINDOW_SIZE = 400, 768
BUTTON_SPACING = 16
class MainUI(Widget):
def __init__(self, **kwargs):
super(MainUI, self).__init__(**kwargs)
self.controller = MainController(window_size=WINDOW_SIZE)
self.build_ui()
@staticmethod
def relative_top(relative_object):
return relative_object.top - relative_object.height - BUTTON_SPACING
def build_ui(self):
# Set MainUI Attributes
self.size = WINDOW_SIZE
# UI Background
self.add_widget(Image(source='./assets/PyStalkerBackground_v2.gif', size=WINDOW_SIZE))
# Add Timer Label
self.add_widget(self.controller.timer_label)
# Add load day before button
self.controller.submit_button.size = 39, 39
self.controller.submit_button.background_normal = './assets/button_add_row.png'
self.controller.submit_button.top = WINDOW_SIZE[1] - 96
self.controller.submit_button.right = 383
self.add_widget(self.controller.submit_button)
# Add load day before button
self.controller.button_previous_day.size = 40, 42
self.controller.button_previous_day.background_normal = './assets/button_prev_day.png'
self.controller.button_previous_day.top = self.relative_top(self.controller.submit_button)
self.controller.button_previous_day.right = 383
self.add_widget(self.controller.button_previous_day)
# Add load day before button
self.controller.button_next_day.size = 39, 42
self.controller.button_next_day.background_normal = './assets/button_next_day.png'
self.controller.button_next_day.top = self.relative_top(self.controller.button_previous_day)
self.controller.button_next_day.right = 383
self.add_widget(self.controller.button_next_day)
# Add Button down
self.controller.button_report.size = 39, 39
self.controller.button_report.background_normal = './assets/button_record.png'
self.controller.button_report.top = self.relative_top(self.controller.button_next_day)
self.controller.button_report.right = 383
self.add_widget(self.controller.button_report)
# Add Time Row Date label
self.controller.current_day_display.font_size = 16
self.controller.current_day_display.size = 295, 38
self.controller.current_day_display.top = WINDOW_SIZE[1] - 80
self.controller.current_day_display.right = 222
self.add_widget(self.controller.current_day_display)
# Add Time Row Form Project Text
self.controller.row_form.text_project.size = 295, 38
self.controller.row_form.text_project.top = WINDOW_SIZE[1] - 120
self.controller.row_form.text_project.right = 310
self.controller.row_form.text_project.background_color = MainController.rgba2float(58, 58, 58)
self.controller.row_form.text_project.font_size = 17
self.controller.row_form.text_project.foreground_color = MainController.rgba2float(163, 152, 51)
# Add Time Row Form Project Description
self.controller.row_form.text_description.size = 295, 110
self.controller.row_form.text_description.top = self.relative_top(self.controller.row_form.text_project)
self.controller.row_form.text_description.right = 310
self.controller.row_form.text_description.background_color = MainController.rgba2float(58, 58, 58)
self.controller.row_form.text_description.font_size = 17
self.controller.row_form.text_description.foreground_color = MainController.rgba2float(163, 152, 51)
# Add Form widget
self.add_widget(self.controller.row_form)
# Add button up
self.controller.button_up.size = 27, 16
self.controller.button_up.background_normal = './assets/button_go_up.png'
self.controller.button_up.top = self.relative_top(self.controller.row_form.text_description)
self.controller.button_up.right = 267
self.add_widget(self.controller.button_up)
# Add Button down
self.controller.button_down.size = 28, 16
self.controller.button_down.background_normal = './assets/button_go_down.png'
self.controller.button_down.top = self.controller.button_up.top
self.controller.button_down.right = self.controller.button_up.right + self.controller.button_up.width + BUTTON_SPACING
self.add_widget(self.controller.button_down)
# Set UI Stack Layout
self.controller.stack_layout.orientation = 'tb-lr'
self.controller.stack_layout.size = (WINDOW_SIZE[0], 430)
self.controller.stack_layout.padding = 10
self.controller.stack_layout.spacing = 5
self.add_widget(self.controller.stack_layout)
| {
"content_hash": "9eab616da552479c711edd5a0792f1bb",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 126,
"avg_line_length": 47.627450980392155,
"alnum_prop": 0.6904075751337999,
"repo_name": "dandro/kivy_timetracker",
"id": "80d024e0490fa6774c8d721deac5358d1fa228b8",
"size": "4858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/MainUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17767"
}
],
"symlink_target": ""
} |
from django.db import models
from model_utils.managers import InheritanceManager
# Create your models here.
class User(models.Model):
class Meta:
verbose_name = "使用者(User)"
account = models.CharField("登入帳號", max_length=30, unique=True)
name = models.CharField("姓名", max_length=10)
nickname = models.CharField("暱稱", max_length=40, blank=True)
member = models.CharField("身分", max_length=20, default="User")
password = models.CharField("密碼", max_length=56, default="71454996db126e238e278a202a7dbc49dda187ec4f8c9dfc95584900")
photo_path = models.URLField("大頭照", default="/static/img/user/supportmale-128.png")
def __str__(self):
return self.name
class Ticket(models.Model):
class Meta:
verbose_name = "案件(Ticket)"
ticket_title = models.CharField("案件主題", max_length=60)
status = models.BooleanField("結案", default=False)
time = models.DateTimeField("建立時間", auto_now_add=True)
opened_user = models.ForeignKey(User, related_name="OpenedUser")
assignee = models.ManyToManyField(User, blank=True)
def __str__(self):
return "#%s %s" % (self.id, self.ticket_title)
class Label(models.Model):
class Meta:
verbose_name = "標籤(Label)"
RED = 'e11d21'
ORANGE = 'eb6420'
YELLOW = 'fbca04'
GREEN = '009800'
DARK_SEA_GREEN = '006b75'
BLUE = '207de5'
DARK_BLUE = '0052cc'
PURPLE = '5319e7'
LIGHT_RED = 'f7c6c7'
LIGHT_ORANGE = 'fad8c7'
LIGHT_YELLOW = 'fef2c0'
LIGHT_GREEN = 'bfe5bf'
LIGHT_DARK_SEA_GREEN = 'bfdadc'
LIGHT_BLUE = 'c7edf8'
LIGHT_DARK_BLUE = 'bfd4f2'
LIGHT_PURPLE = 'd4c5f9'
COLOR_CHOICES = (
('MAIN', (
(RED, '紅色'),
(ORANGE, '橙色'),
(YELLOW, '黃色'),
(GREEN, '綠色'),
(DARK_SEA_GREEN, '暗綠色'),
(BLUE, '藍色'),
(DARK_BLUE, '暗藍色'),
(PURPLE, '紫色'),
)),
('LIGHT', (
(LIGHT_RED, '紅色'),
(LIGHT_ORANGE, '橙色'),
(LIGHT_YELLOW, '黃色'),
(LIGHT_GREEN, '綠色'),
(LIGHT_DARK_SEA_GREEN, '暗綠色'),
(LIGHT_BLUE, '藍色'),
(LIGHT_DARK_BLUE, '暗藍色'),
(LIGHT_PURPLE, '紫色'),
)),
)
color = models.CharField("顏色", max_length=6, choices=COLOR_CHOICES, default=RED)
WHITE = 'ffffff'
BLACK = '000000'
FONTCOLOR_CHOICES = (
(WHITE, '白'),
(BLACK, '黑'),
)
fontcolor = models.CharField("字的顏色", max_length=6, choices=FONTCOLOR_CHOICES, default=WHITE)
label_name = models.CharField("標籤名稱", max_length=20)
tickets = models.ManyToManyField(Ticket, blank=True)
def __str__(self):
return self.label_name
class Comment(models.Model):
class Meta:
verbose_name = "留言(Comment)"
user = models.ForeignKey(User, related_name='Users')
ticket = models.ForeignKey(Ticket, related_name="Tickets")
content = models.CharField("留言內容", max_length=140, blank=True)
time = models.DateTimeField("建立時間", auto_now_add=True)
class GuestComment(models.Model):
class Meta:
verbose_name = "訪客留言(Comment)"
user = models.CharField("姓名", max_length=30)
ticket = models.ForeignKey(Ticket, related_name="commentofguest")
content = models.CharField("留言內容", max_length=140, blank=True)
time = models.DateTimeField("建立時間", auto_now_add=True)
class TicketStatus(models.Model):
class Meta:
verbose_name = "狀態(TicketStatus)"
category = models.CharField("類別", max_length=30)
maker = models.ForeignKey(User, related_name="userchangestatus")
ticket = models.ForeignKey(Ticket, related_name="ticketofstatus")
time = models.DateTimeField("建立時間", auto_now_add=True)
objects = InheritanceManager()
class AddLabel(TicketStatus):
class Meta:
verbose_name = "添加標籤(AddLabel)"
labels = models.ForeignKey(Label, related_name="addtoticket")
class RemoveLabel(TicketStatus):
class Meta:
verbose_name = "取消標籤(RemoveLabel)"
labels = models.ForeignKey(Label, related_name="removefromticket")
class UserAssign(TicketStatus):
class Meta:
verbose_name = "人員指派(UserAssign)"
user = models.ForeignKey(User, related_name="userassigned")
class UserUnassign(TicketStatus):
class Meta:
verbose_name = "取消人員指派(UserUnassign)"
user = models.ForeignKey(User, related_name="userunassigned")
class CloseIssue(TicketStatus):
class Meta:
verbose_name = "結束案件"
class ReopenIssue(TicketStatus):
class Meta:
verbose_name = "重啟案件"
| {
"content_hash": "3c9805e30a52ada1b60344e2462411d7",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 120,
"avg_line_length": 33.90298507462686,
"alnum_prop": 0.6266784063394233,
"repo_name": "noracami/track-it",
"id": "881a78271e3d6a4d660d0a306cf690dccb3bc293",
"size": "4819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trackit/issues/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2208"
},
{
"name": "HTML",
"bytes": "40482"
},
{
"name": "Python",
"bytes": "41386"
}
],
"symlink_target": ""
} |
import sys
### smithwaterman.py ###
# A very basic Smith-Waterman alignment
# Input: two sequences
# Output: Print a local alignment with the starting nucleotide of each sequence before each sequence
# Modifies: STDOUT
maxgap = 10
gapopen = -5
gapextend = -5
match = 10
mismatch = -5
#### print_matrix ###
# Input: A 2D array
# Output: Print matrix to STDOUT
# Modifies: STDOUT
def print_matrix(M):
if(len(M)==0):
return
mlen = len(M)
nlen = len(M[0])
for m in range(0,mlen):
oline = ''
for n in range(0,nlen):
oline = oline + ' ' + str(M[m][n])
print oline
#### print_alignment_matrix ###
# Input: Two sequences, and a 2D array
# Output: Print alignment matrix to STDOUT
# Modifies: STDOUT
def print_alignment_matrix(M,s1,s2):
if(len(M)==0): return
if(len(M)!=len(s2) or len(M[0]) != len(s1)): return
mlen = len(M)
nlen = len(M[0])
line1 = ' '
for c in s1:
line1 = line1 + ' ' + str(c)
print line1
for m in range(0,mlen):
oline = s2[m] + ''
for n in range(0,nlen):
oline = oline + ' ' + str(M[m][n])
print oline
#### diag_score ####
# Fetch the diagnal H value from the current coordinate
# Input: Matrix H and current coordiante row i and column j
# Output: The Hi-1,j-1 value
# Modifies: None
def diag_score(H,i,j):
if(i-1 < 0 or j-1 < 0): return 0
return H[i-1][j-1]
#### match_score ####
# Return the score given the current characters
# Input: Two characters c1 c2
# Output: The score
# Modifies: None
def match_score(c1,c2):
if(c1 == c2): return match
return mismatch
#### row_scores ####
# Return the scores for the gap going up the row
# Input: Score matrix H and current position i j
# Output: an array of scores
# Modifies: None
def row_scores(H,i,j):
oscores = list()
if(i==0):
oscores.append(0)
return oscores
bottom = 0
if i-maxgap > 0: bottom = i-maxgap
for m in range(bottom,i):
k=i-m #distance
oscores.append(H[i-k][j]+gapopen+(k-1)*gapextend)
return oscores
#### col_scores ####
# Return the scores for the gap going across the columnb
# Input: Score matrix H and current position i j
# Output: an array of scores
# Modifies: None
def col_scores(H,i,j):
oscores = list()
if(j==0):
oscores.append(0)
return oscores
bottom = 0
if j-maxgap > 0: bottom = j-maxgap
for m in range(bottom,j):
l=j-m #distance
oscores.append(H[i][j-l]+gapopen+(l-1)*gapextend)
return oscores
#### score_matrix ###
# Make the H scoring matrix for the alginment
# Input: Two sequences
# Output: H a matrix with with the scores computed
# Modifies: STDOUT
def score_matrix(s1,s2):
H = [[0 for x in range(0,len(s1))] for x in range(0,len(s2))] #initialize alignment matrix
mlen = len(H)
nlen = len(H[0])
for m in range(0,mlen):
for n in range(0,nlen):
H[m][n] = max(diag_score(H,m,n) + match_score(s1[n],s2[m]),max(row_scores(H,m,n)),max(col_scores(H,m,n)),0)
#print_alignment_matrix(H,s1,s2)
#print ''
return H
#### matrix_max #####
# return the coordinate of the max value
# Input: takes a matrix H
# Output: list [i,j] with the best coordiante
def matrix_max(H):
mlen = len(H)
nlen = len(H[0])
best = [0,0]
bestval = 0
for m in range(0,mlen):
for n in range(0,nlen):
if H[m][n] > bestval:
best = [m , n]
bestval = H[m][n]
return best
#### next_coord ###
# Print the next coordinate to go to and its score
# Input: H scoring matrix, and current coordinate i j
# Output: the next score and coordiante inext jnext
# Modifies: None
def next_coord(H,i,j):
rowval = 0
if(i-1 >= 0):
rowval = H[i-1][j]
colval = 0
if(j-1 >= 0):
colval = H[i][j-1]
diagval = 0
if(i-1 >=0 and j-1 >= 0):
diagval = H[i-1][j-1]
if(diagval >= rowval and diagval >= colval):
return [diagval,i-1,j-1]
if(rowval >= colval):
return [rowval,i-1,j]
return [colval,i,j-1]
#### print_local_alignment ###
# Print the local alignment given the scoring matrix
# Input: H scoring matrix, sequences s1 and s2
# Output: A best local alignment between the two sequences, returns the max alignment score, and two strings that are the alignment lines, and the start indecies for the two sequences, and a descriptor of how s2 differs from s1
# Modifies: none
def get_local_alignment(H,s1,s2):
mlen = len(H)
nlen = len(H[0])
[i,j] = matrix_max(H)
currentscore = H[i][j]
maxscore = currentscore
s1o = list()
s2o = list()
a1 = ''
a2 = ''
[isave,jsave] = [0,0]
while(currentscore > 0 and i >= 0 and j >= 0):
[isave, jsave] = [i,j]
[currentscore,inext,jnext] = next_coord(H,i,j)
if(inext==i): # skip one on s2
s1o.insert(0,s1[j])
s2o.insert(0,'-')
elif(jnext==j): #skip one on s1
s1o.insert(0,'-')
s2o.insert(0,s2[i])
else:
s1o.insert(0,s1[j])
s2o.insert(0,s2[i])
[i,j] = [inext,jnext]
s1start = jsave+1
s2start = isave+1
a1 = ''.join(s1o)
a2 = ''.join(s2o)
return [maxscore, a1, a2,s1start,s2start]
s1 = sys.argv[1]
s2 = sys.argv[2]
M = [[0 for x in range(0,len(s1))] for x in range(0,len(s2))] #initialize alignment matrix
H = score_matrix(s1,s2)
#print_alignment_matrix(H,s1,s2)
[maxscore,s1align,s2align,s1coord,s2coord] = get_local_alignment(H,s1,s2)
print maxscore
print s1coord
print s1align
print s2align
print s2coord
| {
"content_hash": "23ceef20a2e14290ffc0ea8924335e91",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 228,
"avg_line_length": 26.386138613861387,
"alnum_prop": 0.6307692307692307,
"repo_name": "jason-weirather/Au-public",
"id": "fc06b9535c4203a85408847f6eed3fba14a8aa04",
"size": "5348",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iron/utilities/smithwaterman.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "131627"
},
{
"name": "Python",
"bytes": "1627906"
},
{
"name": "R",
"bytes": "3887"
},
{
"name": "Shell",
"bytes": "1452"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import pytest
from sprinter.next.environment.injections import Injections
TEST_CONTENT = """
Testing abc.
#OVERRIDE
here is an override string. it should appear at the bottom.
#OVERRIDE
"""
TEST_OVERRIDE_CONTENT = """
Testing abc.
#testinjection
injectme
#testinjection
#OVERRIDE
here is an override string. it should appear at the bottom.
#OVERRIDE
"""
PERMANENT_STRING = "this should stay no matter what."
TEST_INJECTION = "this should stay temporarily"
@pytest.fixture
def test_file(tmpdir):
f = tmpdir.join("test")
f.write(PERMANENT_STRING)
return f
@pytest.fixture
def injections():
return Injections("testinjection", override="OVERRIDE")
def test_backup_file_created(test_file, injections):
"""test a backup file is created."""
injections.inject(test_file.strpath, TEST_INJECTION)
injections.commit()
assert os.path.exists(test_file.strpath + ".sprinter.bak")
os.unlink(test_file.strpath + ".sprinter.bak")
injections.clear(test_file.strpath)
injections.commit()
assert os.path.exists(test_file.strpath + ".sprinter.bak")
def test_injection(test_file, injections):
"""test a complete injection workflow."""
injections.inject(test_file.strpath, TEST_INJECTION)
injections.commit()
assert (
test_file.read().count(TEST_INJECTION) > 0
), "Injection was not injected properly!"
assert (
test_file.read().count(TEST_INJECTION) == 1
), "Multiple injections were found!"
assert (
test_file.read().find(PERMANENT_STRING) != -1
), "Permanent string was removed on inject!"
injections.clear(test_file.strpath)
injections.commit()
assert (
test_file.read().find(TEST_INJECTION) == -1
), "Injection was not cleared properly!"
assert (
test_file.read().find(PERMANENT_STRING) != -1
), "Permanent string was removed on clear!"
def test_similar_injectionname(test_file, injections):
injections.inject(test_file.strpath, TEST_INJECTION)
injections.commit()
SIMILAR_INJECTION = "This is a similar injection"
i_similiar = Injections("testinjectionsagain")
i_similiar.inject(test_file.strpath, SIMILAR_INJECTION)
i_similiar.commit()
assert (
test_file.read().count(SIMILAR_INJECTION) > 0
), "Similar injection was removed!"
assert (
test_file.read().count(SIMILAR_INJECTION) == 1
), "Multiple injections were found!"
injections.clear(test_file.strpath)
injections.commit()
assert (
test_file.read().find(TEST_INJECTION) == -1
), "Injection was not cleared properly!"
assert (
test_file.read().find(SIMILAR_INJECTION) > 0
), "Similar Injection was incorrectly cleared!"
def test_override(injections):
"""Test the override functionality"""
c = injections.inject_content(TEST_CONTENT, "injectme")
assert c == TEST_OVERRIDE_CONTENT, "Override result is different from expected."
def test_unicode():
"""Test the unicode functionality"""
i = Injections("\xf0\x9f\x86\x92", override="OVERRIDE")
i.inject_content(TEST_CONTENT, "injectme")
def test_injected(test_file, injections):
"""Test the injected method to determine if a file has already been injected..."""
assert not injections.injected(
test_file.strpath
), "Injected check returned true when not injected yet."
injections.inject(test_file.strpath, TEST_INJECTION)
injections.commit()
assert injections.injected(test_file.strpath), "Injected check returned false"
def test_in_noninjected_file(test_file, injections):
"""
in_noninjected_file should return true if a string exists
non-injected and false it only exists in injected
"""
assert not injections.injected(
test_file.strpath
), "Injected check returned true when not injected yet."
injections.inject(test_file.strpath, TEST_INJECTION)
injections.commit()
assert injections.in_noninjected_file(test_file.strpath, PERMANENT_STRING)
assert not injections.in_noninjected_file(test_file.strpath, TEST_INJECTION)
def test_injected_injects_after_overrides(injections):
"""
re-injecting into a file will come after all other content
"""
ORIGINAL_STRING = """
#testinjection
injectme
#testinjection
#OVERRIDE
overidden content
#OVERRIDE
non-override content
""".strip()
c = injections.inject_content(ORIGINAL_STRING, "injectme")
assert c.find("injectme") > c.find("non-override content")
def test_created(tmpdir, injections):
"""Test the injection creates a file if it does not exist"""
new_file = os.path.join(tmpdir.strpath, "testcreated")
injections.inject(new_file, TEST_INJECTION)
injections.commit()
assert os.path.exists(new_file), "File was not generated on injection!"
def test_clear_nonexistent_file(tmpdir):
"""clear should not create a file"""
i = Injections("testinjection")
new_file = os.path.join(tmpdir.strpath, "dontcreateme")
i.clear(new_file)
i.commit()
assert not os.path.exists(new_file)
| {
"content_hash": "baf5a7fcabc01c5dfd941ae023e6b00d",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 86,
"avg_line_length": 29.935294117647057,
"alnum_prop": 0.6989585380231873,
"repo_name": "toumorokoshi/sprinter",
"id": "1040b8e6e4ddd9e7e1f1acffad865e2269f3f4d2",
"size": "5089",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sprinter/next/environment/tests/test_injections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "528"
},
{
"name": "Python",
"bytes": "267773"
},
{
"name": "Shell",
"bytes": "2058"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
# Doesn't apply to Dash Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
| {
"content_hash": "aebbe252fffaa1e59e91cba995248c9c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 120,
"avg_line_length": 48.063291139240505,
"alnum_prop": 0.6494601000790098,
"repo_name": "willwray/dash",
"id": "a11608777d64d81ce0426b443644063c58c65de9",
"size": "4053",
"binary": false,
"copies": "2",
"ref": "refs/heads/v0.12.2.x",
"path": "qa/rpc-tests/merkle_blocks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1334512"
},
{
"name": "C++",
"bytes": "5531446"
},
{
"name": "CSS",
"bytes": "126480"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "164792"
},
{
"name": "Makefile",
"bytes": "97956"
},
{
"name": "Objective-C",
"bytes": "4937"
},
{
"name": "Objective-C++",
"bytes": "7224"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "743411"
},
{
"name": "QMake",
"bytes": "2055"
},
{
"name": "Roff",
"bytes": "3688"
},
{
"name": "Shell",
"bytes": "35618"
}
],
"symlink_target": ""
} |
import sys
import os
import subprocess
import time
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
################
#### InfluxDB Variables
################
# Packaging variables
PACKAGE_NAME = "influxdb"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/influxdb"
DATA_DIR = "/var/lib/influxdb"
SCRIPT_DIR = "/usr/lib/influxdb/scripts"
CONFIG_DIR = "/etc/influxdb"
LOGROTATE_DIR = "/etc/logrotate.d"
MAN_DIR = "/usr/share/man"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/influxdb.service"
PREINST_SCRIPT = "scripts/pre-install.sh"
POSTINST_SCRIPT = "scripts/post-install.sh"
POSTUNINST_SCRIPT = "scripts/post-uninstall.sh"
LOGROTATE_SCRIPT = "scripts/logrotate"
DEFAULT_CONFIG = "etc/config.sample.toml"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts"
CONFIGURATION_FILES = [
CONFIG_DIR + '/influxdb.conf',
LOGROTATE_DIR + '/influxdb',
]
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/influxdb"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Distributed time-series database."
prereqs = [ 'git', 'go' ]
go_vet_command = "go vet ./..."
optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--after-install {} \
--before-install {} \
--after-remove {} \
--license {} \
--maintainer {} \
--directories {} \
--directories {} \
--directories {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
POSTINST_SCRIPT,
PREINST_SCRIPT,
POSTUNINST_SCRIPT,
PACKAGE_LICENSE,
MAINTAINER,
LOG_DIR,
DATA_DIR,
MAN_DIR,
DESCRIPTION)
for f in CONFIGURATION_FILES:
fpm_common_args += " --config-files {}".format(f)
targets = {
'influx' : './cmd/influx',
'influxd' : './cmd/influxd',
'influx_stress' : './cmd/influx_stress',
'influx_inspect' : './cmd/influx_inspect',
'influx_tsm' : './cmd/influx_tsm',
}
supported_builds = {
'darwin': [ "amd64" ],
'windows': [ "amd64" ],
'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ]
}
supported_packages = {
"darwin": [ "tar" ],
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
}
################
#### InfluxDB Functions
################
def print_banner():
logging.info("""
___ __ _ ___ ___
|_ _|_ _ / _| |_ ___ _| \\| _ )
| || ' \\| _| | || \\ \\ / |) | _ \\
|___|_||_|_| |_|\\_,_/_\\_\\___/|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating package filesystem at location: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:],
LOG_DIR[1:],
DATA_DIR[1:],
SCRIPT_DIR[1:],
CONFIG_DIR[1:],
LOGROTATE_DIR[1:],
MAN_DIR[1:] ]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if config_only:
logging.debug("Copying configuration to build directory.")
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf"))
os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644)
else:
logging.debug("Copying scripts and sample configuration to build directory.")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644)
def package_man_files(build_root):
"""Copy and gzip man pages to the package filesystem."""
logging.debug("Installing man pages.")
run("make -C man/ clean install DESTDIR={}/usr".format(build_root))
for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])):
for f in files:
run("gzip -9n {}".format(os.path.join(path, f)))
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("dep"):
logging.info("Downloading `dep`...")
get_command = "go get github.com/golang/dep/cmd/dep"
run(get_command)
logging.info("Retrieving dependencies with `dep`...")
sys.stdout.flush()
run("{}/bin/dep ensure -v -vendor-only".format(os.environ.get("GOPATH")))
return True
def run_tests(race, parallel, timeout, no_vet, junit=False):
"""Run the Go test suite on binary output.
"""
logging.info("Starting tests...")
if race:
logging.info("Race is enabled.")
if parallel is not None:
logging.info("Using parallel: {}".format(parallel))
if timeout is not None:
logging.info("Using timeout: {}".format(timeout))
out = run("go fmt ./...")
if len(out) > 0:
logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
logging.error("{}".format(out))
return False
if not no_vet:
logging.info("Running 'go vet'...")
out = run(go_vet_command)
if len(out) > 0:
logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.")
logging.error("{}".format(out))
return False
else:
logging.info("Skipping 'go vet' call...")
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
if junit:
logging.info("Retrieving go-junit-report...")
run("go get github.com/jstemmer/go-junit-report")
# Retrieve the output from this command.
logging.info("Running tests...")
logging.debug("{}".format(test_command))
proc = subprocess.Popen(test_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, unused_err = proc.communicate()
output = output.decode('utf-8').strip()
# Process the output through go-junit-report.
with open('test-results.xml', 'w') as f:
logging.debug("{}".format("go-junit-report"))
junit_proc = subprocess.Popen(["go-junit-report"], stdin=subprocess.PIPE, stdout=f, stderr=subprocess.PIPE)
unused_output, err = junit_proc.communicate(output.encode('ascii', 'ignore'))
if junit_proc.returncode != 0:
logging.error("Command '{}' failed with error: {}".format("go-junit-report", err))
sys.exit(1)
if proc.returncode != 0:
logging.error("Command '{}' failed with error: {}".format(test_command, output.encode('ascii', 'ignore')))
sys.exit(1)
else:
logging.info("Running tests...")
output = run(test_command)
logging.debug("Test output:\n{}".format(out.encode('ascii', 'ignore')))
return True
################
#### All InfluxDB-specific content above this line
################
def run(command, allow_failure=False, shell=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --always --tags --abbrev=0")
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif arch == "aarch64":
arch = "arm64"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
if "1.4" in get_go_version():
if static:
build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
# Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
if static:
build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
if static:
build_command += "-a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_md5_from_file(path):
"""Generate MD5 signature based on the contents of the file at path.
"""
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
if platform != "windows":
package_man_files(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
else:
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz --owner=root ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT)
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
os.rename(outfile, new_outfile)
outfile = new_outfile
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.version = increment_minor_version(args.version)
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(target_platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch))
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit))
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report):
return 1
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
for p in packages:
logging.info("{} (MD5={})".format(p.split('/')[-1:][0],
generate_md5_from_file(p)))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch))
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--junit-report',
action='store_true',
help='Output tests in the JUnit XML format')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
| {
"content_hash": "aae535e077869b989fef9e4d81d587d6",
"timestamp": "",
"source": "github",
"line_count": 989,
"max_line_length": 135,
"avg_line_length": 42.05460060667341,
"alnum_prop": 0.520196191575303,
"repo_name": "rjtsdl/acs-engine",
"id": "3f41b0e120d295870c41338607fea0ff6ab5d75c",
"size": "41617",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vendor/github.com/influxdata/influxdb/build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "1589471"
},
{
"name": "Groovy",
"bytes": "24761"
},
{
"name": "Makefile",
"bytes": "6714"
},
{
"name": "Perl",
"bytes": "68823"
},
{
"name": "Perl 6",
"bytes": "360324"
},
{
"name": "PowerShell",
"bytes": "61189"
},
{
"name": "Python",
"bytes": "6764"
},
{
"name": "Shell",
"bytes": "163900"
}
],
"symlink_target": ""
} |
"""Cholesky decomposition functions."""
from numpy import asarray_chkfinite
# Local imports
from misc import LinAlgError, _datacopied
from lapack import get_lapack_funcs
__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',
'cho_solve_banded']
def _cholesky(a, lower=False, overwrite_a=False, clean=True):
"""Common code for cholesky() and cho_factor()."""
a1 = asarray_chkfinite(a)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
potrf, = get_lapack_funcs(('potrf',), (a1,))
c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal potrf'
% -info)
return c, lower
def cholesky(a, lower=False, overwrite_a=False):
"""Compute the Cholesky decomposition of a matrix.
Returns the Cholesky decomposition, :lm:`A = L L^*` or :lm:`A = U^* U`
of a Hermitian positive-definite matrix :lm:`A`.
Parameters
----------
a : array, shape (M, M)
Matrix to be decomposed
lower : boolean
Whether to compute the upper or lower triangular Cholesky factorization
(Default: upper-triangular)
overwrite_a : boolean
Whether to overwrite data in a (may improve performance)
Returns
-------
c : array, shape (M, M)
Upper- or lower-triangular Cholesky factor of A
Raises LinAlgError if decomposition fails
Examples
--------
>>> from scipy import array, linalg, dot
>>> a = array([[1,-2j],[2j,5]])
>>> L = linalg.cholesky(a, lower=True)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> dot(L, L.T.conj())
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
"""
c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True)
return c
def cho_factor(a, lower=False, overwrite_a=False):
"""Compute the Cholesky decomposition of a matrix, to use in cho_solve
Returns a matrix containing the Cholesky decomposition,
``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.
The return value can be directly used as the first parameter to cho_solve.
.. warning::
The returned matrix also contains random data in the entries not
used by the Cholesky decomposition. If you need to zero these
entries, use the function `cholesky` instead.
Parameters
----------
a : array, shape (M, M)
Matrix to be decomposed
lower : boolean
Whether to compute the upper or lower triangular Cholesky factorization
(Default: upper-triangular)
overwrite_a : boolean
Whether to overwrite data in a (may improve performance)
Returns
-------
c : array, shape (M, M)
Matrix whose upper or lower triangle contains the Cholesky factor
of `a`. Other parts of the matrix contain random data.
lower : boolean
Flag indicating whether the factor is in the lower or upper triangle
Raises
------
LinAlgError
Raised if decomposition fails.
See also
--------
cho_solve : Solve a linear set equations using the Cholesky factorization
of a matrix.
"""
c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False)
return c, lower
def cho_solve((c, lower), b, overwrite_b=False):
"""Solve the linear equations A x = b, given the Cholesky factorization of A.
Parameters
----------
(c, lower) : tuple, (array, bool)
Cholesky factorization of a, as given by cho_factor
b : array
Right-hand side
Returns
-------
x : array
The solution to the system A x = b
See also
--------
cho_factor : Cholesky factorization of a matrix
"""
b1 = asarray_chkfinite(b)
c = asarray_chkfinite(c)
if c.ndim != 2 or c.shape[0] != c.shape[1]:
raise ValueError("The factored matrix c is not square.")
if c.shape[1] != b1.shape[0]:
raise ValueError("incompatible dimensions.")
overwrite_b = overwrite_b or _datacopied(b1, b)
potrs, = get_lapack_funcs(('potrs',), (c, b1))
x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b)
if info != 0:
raise ValueError('illegal value in %d-th argument of internal potrs'
% -info)
return x
def cholesky_banded(ab, overwrite_ab=False, lower=False):
"""Cholesky decompose a banded Hermitian positive-definite matrix
The matrix a is stored in ab either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of ab (shape of a is (6,6), u=2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
ab : array, shape (u + 1, M)
Banded matrix
overwrite_ab : boolean
Discard data in ab (may enhance performance)
lower : boolean
Is the matrix in the lower form. (Default is upper form)
Returns
-------
c : array, shape (u+1, M)
Cholesky factorization of a, in the same banded format as ab
"""
ab = asarray_chkfinite(ab)
pbtrf, = get_lapack_funcs(('pbtrf',), (ab,))
c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbtrf'
% -info)
return c
def cho_solve_banded((cb, lower), b, overwrite_b=False):
"""Solve the linear equations A x = b, given the Cholesky factorization of A.
Parameters
----------
(cb, lower) : tuple, (array, bool)
`cb` is the Cholesky factorization of A, as given by cholesky_banded.
`lower` must be the same value that was given to cholesky_banded.
b : array
Right-hand side
overwrite_b : bool
If True, the function will overwrite the values in `b`.
Returns
-------
x : array
The solution to the system A x = b
See also
--------
cholesky_banded : Cholesky factorization of a banded matrix
Notes
-----
.. versionadded:: 0.8.0
"""
cb = asarray_chkfinite(cb)
b = asarray_chkfinite(b)
# Validate shapes.
if cb.shape[-1] != b.shape[0]:
raise ValueError("shapes of cb and b are not compatible.")
pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b))
x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbtrs'
% -info)
return x
| {
"content_hash": "fa9da25b51b2c44dc8a8bd37125b4b57",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 81,
"avg_line_length": 30.59259259259259,
"alnum_prop": 0.5839386602098466,
"repo_name": "lesserwhirls/scipy-cwt",
"id": "cb14cb466f267e0328026bec570e51c023fd21d0",
"size": "7434",
"binary": false,
"copies": "59",
"ref": "refs/heads/cwt",
"path": "scipy/linalg/decomp_cholesky.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8532454"
},
{
"name": "C++",
"bytes": "6602032"
},
{
"name": "FORTRAN",
"bytes": "5895476"
},
{
"name": "Objective-C",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "4776663"
},
{
"name": "Shell",
"bytes": "1742"
}
],
"symlink_target": ""
} |
import datetime
from django import forms
from django.test import TestCase
from .models import Article
class FormsTests(TestCase):
# ForeignObjects should not have any form fields, currently the user needs
# to manually deal with the foreignobject relation.
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
def test_foreign_object_form(self):
# A very crude test checking that the non-concrete fields do not get form fields.
form = FormsTests.ArticleForm()
self.assertIn('id_pub_date', form.as_table())
self.assertNotIn('active_translation', form.as_table())
form = FormsTests.ArticleForm(data={'pub_date': str(datetime.date.today())})
self.assertTrue(form.is_valid())
a = form.save()
self.assertEqual(a.pub_date, datetime.date.today())
form = FormsTests.ArticleForm(instance=a, data={'pub_date': '2013-01-01'})
a2 = form.save()
self.assertEqual(a.pk, a2.pk)
self.assertEqual(a2.pub_date, datetime.date(2013, 1, 1))
| {
"content_hash": "6e67d968ddcac8a875a578181a0b716d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 89,
"avg_line_length": 38.86206896551724,
"alnum_prop": 0.639751552795031,
"repo_name": "yephper/django",
"id": "edcf633ee597c239ccbe262c4e10d6a880f55910",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/foreign_object/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""Tests for corrupted MNIST."""
from tensorflow_datasets import testing
from tensorflow_datasets.image_classification import mnist_corrupted
class MNISTCorruptedTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["dotted_line"]
DATASET_CLASS = mnist_corrupted.MNISTCorrupted
SPLITS = {
"train": 2,
"test": 2,
}
if __name__ == "__main__":
testing.test_main()
| {
"content_hash": "f53a7ad0d087279954591365848e92a1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 21.36842105263158,
"alnum_prop": 0.7044334975369458,
"repo_name": "tensorflow/datasets",
"id": "512dacc31f4eb6a6d14e520e9a96e2c09a6c92e9",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/image_classification/mnist_corrupted_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
} |
import inspect
import os
import sys
import traceback
from time import sleep
scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
maindir = os.path.abspath(os.path.join(scriptdir, '../../'))
sys.path.append(maindir)
transitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))
sys.path.append(transitionsdir)
from oscrypto import *
from encryptstates import *
from Common import *
from CommandExecutor import *
from DiskUtil import *
from transitions import *
class RHEL72LVMEncryptionStateMachine(OSEncryptionStateMachine):
states = [
State(name='uninitialized'),
State(name='prereq', on_enter='on_enter_state'),
State(name='selinux', on_enter='on_enter_state'),
State(name='stripdown', on_enter='on_enter_state'),
State(name='unmount_oldroot', on_enter='on_enter_state'),
State(name='encrypt_block_device', on_enter='on_enter_state'),
State(name='patch_boot_system', on_enter='on_enter_state'),
State(name='completed'),
]
transitions = [
{
'trigger': 'skip_encryption',
'source': 'uninitialized',
'dest': 'completed'
},
{
'trigger': 'enter_prereq',
'source': 'uninitialized',
'dest': 'prereq'
},
{
'trigger': 'enter_selinux',
'source': 'prereq',
'dest': 'selinux',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_stripdown',
'source': 'selinux',
'dest': 'stripdown',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_unmount_oldroot',
'source': 'stripdown',
'dest': 'unmount_oldroot',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'retry_unmount_oldroot',
'source': 'unmount_oldroot',
'dest': 'unmount_oldroot',
'before': 'on_enter_state'
},
{
'trigger': 'enter_encrypt_block_device',
'source': 'unmount_oldroot',
'dest': 'encrypt_block_device',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_patch_boot_system',
'source': 'encrypt_block_device',
'dest': 'patch_boot_system',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'stop_machine',
'source': 'patch_boot_system',
'dest': 'completed',
'conditions': 'should_exit_previous_state'
},
]
def on_enter_state(self):
super(RHEL72LVMEncryptionStateMachine, self).on_enter_state()
def should_exit_previous_state(self):
# when this is called, self.state is still the "source" state in the transition
return super(RHEL72LVMEncryptionStateMachine, self).should_exit_previous_state()
def __init__(self, hutil, distro_patcher, logger, encryption_environment):
super(RHEL72LVMEncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)
self.state_objs = {
'prereq': PrereqState(self.context),
'selinux': SelinuxState(self.context),
'stripdown': StripdownState(self.context),
'unmount_oldroot': UnmountOldrootState(self.context),
'encrypt_block_device': EncryptBlockDeviceState(self.context),
'patch_boot_system': PatchBootSystemState(self.context),
}
self.state_machine = Machine(model=self,
states=RHEL72LVMEncryptionStateMachine.states,
transitions=RHEL72LVMEncryptionStateMachine.transitions,
initial='uninitialized')
def start_encryption(self):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="pvdisplay",
raise_exception_on_failure=True,
communicator=proc_comm)
patch_boot_system_state_marker = os.path.join(self.encryption_environment.os_encryption_markers_path, 'PatchBootSystemState')
if '/dev/mapper/osencrypt' in proc_comm.stdout and os.path.exists(patch_boot_system_state_marker):
self.logger.log("OS volume is already encrypted")
self.skip_encryption()
self.log_machine_state()
return
self.log_machine_state()
self.enter_prereq()
self.log_machine_state()
self.enter_selinux()
self.log_machine_state()
self.enter_stripdown()
self.log_machine_state()
oldroot_unmounted_successfully = False
attempt = 1
while not oldroot_unmounted_successfully:
self.logger.log("Attempt #{0} to unmount /oldroot".format(attempt))
try:
if attempt == 1:
self.enter_unmount_oldroot()
elif attempt > 10:
raise Exception("Could not unmount /oldroot in 10 attempts")
else:
self.retry_unmount_oldroot()
self.log_machine_state()
except Exception as e:
message = "Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}".format(attempt,
e,
traceback.format_exc())
self.logger.log(msg=message)
self.hutil.do_status_report(operation='EnableEncryptionOSVolume',
status=CommonVariables.extension_error_status,
status_code=str(CommonVariables.unmount_oldroot_error),
message=message)
sleep(10)
if attempt > 10:
raise Exception(message)
else:
oldroot_unmounted_successfully = True
finally:
attempt += 1
self.enter_encrypt_block_device()
self.log_machine_state()
self.enter_patch_boot_system()
self.log_machine_state()
self.stop_machine()
self.log_machine_state()
| {
"content_hash": "8e89097f5a8340cf63173695b1f61dd9",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 133,
"avg_line_length": 37.26923076923077,
"alnum_prop": 0.5366357069143447,
"repo_name": "soumyanishan/azure-linux-extensions",
"id": "3fb8a40c809d984d1ac1ccaf4d6c789ebcf13ea5",
"size": "7444",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "VMEncryption/main/oscrypto/rhel_72_lvm/RHEL72LVMEncryptionStateMachine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "75094"
},
{
"name": "C++",
"bytes": "1038084"
},
{
"name": "CMake",
"bytes": "11642"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "7385"
},
{
"name": "PowerShell",
"bytes": "24124"
},
{
"name": "Python",
"bytes": "4380432"
},
{
"name": "Roff",
"bytes": "3827"
},
{
"name": "Shell",
"bytes": "30126"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from rest_framework import routers
router = routers.DefaultRouter()
import methods.routers
import proteomics.routers
import phosphoproteomics.routers
urlpatterns = [
url(r"^", include(router.urls)),
url(r"^v1/", include(router.urls, namespace="api_v1")),
url(r"^admin/", include(admin.site.urls)),
url(r"^api-auth/", include("rest_framework.urls", namespace="rest_framework")),
]
| {
"content_hash": "3b0c816de27d81e24c0cca89096f37eb",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 83,
"avg_line_length": 28.529411764705884,
"alnum_prop": 0.7298969072164948,
"repo_name": "naderm/django_rest_omics",
"id": "0b7e7952581b91b856fe10d1ca1a8525f78d81a2",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_rest_omics/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10911"
}
],
"symlink_target": ""
} |
"""Numpy-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.utils.to_categorical')
def to_categorical(y, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
Arguments:
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
Returns:
A binary matrix representation of the input.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@tf_export('keras.utils.normalize')
def normalize(x, axis=-1, order=2):
"""Normalizes a Numpy array.
Arguments:
x: Numpy array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. 2 for L2 norm).
Returns:
A normalized copy of the array.
"""
l2 = np.atleast_1d(np.linalg.norm(x, order, axis))
l2[l2 == 0] = 1
return x / np.expand_dims(l2, axis)
| {
"content_hash": "26281a943896a16c46a82a680940b6ad",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 67,
"avg_line_length": 28.32075471698113,
"alnum_prop": 0.6722185209860093,
"repo_name": "Xeralux/tensorflow",
"id": "a611be08aaed824ebb278b4b28ef52ea1872563b",
"size": "2190",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/_impl/keras/utils/np_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
} |
import threading
import Queue
# A simple in memory message bus
class Subscriber(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def notify(self, message):
print("{} got notified for {}".format(self._name, message))
class Bus(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self._name = name
self._subscribers = list()
self._queue = Queue.Queue()
self._stop = False
@property
def name(self):
return self._name
def publish(self, message):
self._queue.put(message)
def subcribe(self, subscriber):
if subscriber not in self._subscribers:
self._subscribers.append(subscriber)
def unsubscribe(self, subscriber):
if subscriber in self._subscribers:
index = self._subscribers.index(subscriber)
self._subscribers.pop(index)
def run(self):
print("{} Start".format(self._name))
while not self._stop:
try:
message = self._queue.get(True,3)
for subscriber in self._subscribers:
subscriber.notify(message)
except:
pass
print("{} Stop".format(self._name))
def stop(self):
print("{} set to complete".format(self._name))
self._stop = True
| {
"content_hash": "bf92f896389ade3db9b2203401ea4703",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 67,
"avg_line_length": 25.446428571428573,
"alnum_prop": 0.5684210526315789,
"repo_name": "gangtao/pyflow",
"id": "430609c89c73553226317344d3786f3b601f7fae",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/message/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2997"
},
{
"name": "HTML",
"bytes": "4137"
},
{
"name": "JavaScript",
"bytes": "63878"
},
{
"name": "Makefile",
"bytes": "406"
},
{
"name": "Python",
"bytes": "53385"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
"""Device tracker platform that adds support for OwnTracks over MQTT."""
from homeassistant.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.components.device_tracker.const import SOURCE_TYPE_GPS
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
)
from homeassistant.core import callback
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_ALTITUDE,
ATTR_COURSE,
ATTR_DEVICE_NAME,
ATTR_SPEED,
ATTR_VERTICAL_ACCURACY,
SIGNAL_LOCATION_UPDATE,
)
from .helpers import device_info
ATTR_KEYS = (ATTR_ALTITUDE, ATTR_COURSE, ATTR_SPEED, ATTR_VERTICAL_ACCURACY)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up OwnTracks based off an entry."""
entity = MobileAppEntity(entry)
async_add_entities([entity])
return True
class MobileAppEntity(TrackerEntity, RestoreEntity):
"""Represent a tracked device."""
def __init__(self, entry, data=None):
"""Set up OwnTracks entity."""
self._entry = entry
self._data = data
self._dispatch_unsub = None
@property
def unique_id(self):
"""Return the unique ID."""
return self._entry.data[ATTR_DEVICE_ID]
@property
def battery_level(self):
"""Return the battery level of the device."""
return self._data.get(ATTR_BATTERY)
@property
def extra_state_attributes(self):
"""Return device specific attributes."""
attrs = {}
for key in ATTR_KEYS:
value = self._data.get(key)
if value is not None:
attrs[key] = value
return attrs
@property
def location_accuracy(self):
"""Return the gps accuracy of the device."""
return self._data.get(ATTR_GPS_ACCURACY)
@property
def latitude(self):
"""Return latitude value of the device."""
gps = self._data.get(ATTR_GPS)
if gps is None:
return None
return gps[0]
@property
def longitude(self):
"""Return longitude value of the device."""
gps = self._data.get(ATTR_GPS)
if gps is None:
return None
return gps[1]
@property
def location_name(self):
"""Return a location name for the current location of the device."""
if location_name := self._data.get(ATTR_LOCATION_NAME):
return location_name
return None
@property
def name(self):
"""Return the name of the device."""
return self._entry.data[ATTR_DEVICE_NAME]
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@property
def device_info(self):
"""Return the device info."""
return device_info(self._entry.data)
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
await super().async_added_to_hass()
self._dispatch_unsub = self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_LOCATION_UPDATE.format(self._entry.entry_id), self.update_data
)
# Don't restore if we got set up with data.
if self._data is not None:
return
state = await self.async_get_last_state()
if state is None:
self._data = {}
return
attr = state.attributes
data = {
ATTR_GPS: (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE)),
ATTR_GPS_ACCURACY: attr.get(ATTR_GPS_ACCURACY),
ATTR_BATTERY: attr.get(ATTR_BATTERY_LEVEL),
}
data.update({key: attr[key] for key in attr if key in ATTR_KEYS})
self._data = data
async def async_will_remove_from_hass(self):
"""Call when entity is being removed from hass."""
await super().async_will_remove_from_hass()
if self._dispatch_unsub:
self._dispatch_unsub()
self._dispatch_unsub = None
@callback
def update_data(self, data):
"""Mark the device as seen."""
self._data = data
self.async_write_ha_state()
| {
"content_hash": "bc2ca9b258d37ea4a13822ff6ac7ff2a",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 85,
"avg_line_length": 28.448051948051948,
"alnum_prop": 0.6165259073270942,
"repo_name": "Danielhiversen/home-assistant",
"id": "1deebf6b5316566bdb248245208d4a22fe41096c",
"size": "4381",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mobile_app/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from __future__ import print_function,division
import numpy as np
import time
def calc_pi():
"""A crude (even for python) implentation of monte pi
"""
Nsample=100000000
x = np.random.rand(Nsample)
y = np.random.rand(Nsample)
count = np.sum(np.sqrt(x**2+y**2)<1)
return 4*count/Nsample
if __name__ == '__main__':
start_t=time.clock()
pi = calc_pi()
end_t=time.clock()
print('pi =',pi)
print('Time elapsed =',end_t-start_t)
| {
"content_hash": "e92d103bb3e0f924108b2ecc067f4147",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 57,
"avg_line_length": 23.85,
"alnum_prop": 0.6037735849056604,
"repo_name": "samjcus/pragma_pragma_pragma",
"id": "65477c66d2b0fd00b882021a86437dba817df91c",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monte_pi/bin/monte_pi_serial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1359"
},
{
"name": "C",
"bytes": "13758"
},
{
"name": "C++",
"bytes": "553"
},
{
"name": "Fortran",
"bytes": "8112"
},
{
"name": "Makefile",
"bytes": "956"
},
{
"name": "Python",
"bytes": "2056"
}
],
"symlink_target": ""
} |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def MigrationWarningEvent(vim, *args, **kwargs):
'''A migration warning.'''
obj = vim.client.factory.create('ns0:MigrationWarningEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 6:
raise IndexError('Expected at least 7 arguments got: %d' % len(args))
required = [ 'fault', 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| {
"content_hash": "f80b7b50a039038713f809421b663cc4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 124,
"avg_line_length": 34.54545454545455,
"alnum_prop": 0.5921052631578947,
"repo_name": "xuru/pyvisdk",
"id": "502b6e0a14c94dd21a9a0087bead3d02486e9ba7",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/migration_warning_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
default_app_config = 'banners.apps.BannersAppConfig'
| {
"content_hash": "295d995955c38139c3e2a6606cfcb8a5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 52,
"avg_line_length": 53,
"alnum_prop": 0.8113207547169812,
"repo_name": "python/pythondotorg",
"id": "010b54570df8c48f47439ca17c77ede3db4a785c",
"size": "53",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "banners/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "Dockerfile",
"bytes": "229"
},
{
"name": "HTML",
"bytes": "498813"
},
{
"name": "JavaScript",
"bytes": "24050"
},
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1145343"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "198033"
}
],
"symlink_target": ""
} |
"""xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
| {
"content_hash": "7031b5ec2689fc5f29709ff6589bb365",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 131,
"avg_line_length": 29.858870967741936,
"alnum_prop": 0.5904118838622552,
"repo_name": "pokowaka/dhammapada-ionic",
"id": "7657b9d377328d782c7b249ae414b0df4c79356b",
"size": "7428",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "xml2json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "511522"
},
{
"name": "HTML",
"bytes": "5159"
},
{
"name": "JavaScript",
"bytes": "2640042"
},
{
"name": "Python",
"bytes": "7428"
},
{
"name": "Shell",
"bytes": "590"
},
{
"name": "XSLT",
"bytes": "1093"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from paypal.standard.ipn.models import PayPalIPN
class PayPalIPNAdmin(admin.ModelAdmin):
date_hierarchy = 'payment_date'
fieldsets = (
(None, {
"fields": [
"flag", "txn_id", "txn_type", "payment_status", "payment_date",
"transaction_entity", "reason_code", "pending_reason",
"mc_gross", "mc_fee", "auth_status", "auth_amount", "auth_exp",
"auth_id"
]
}),
("Address", {
"description": "The address of the Buyer.",
'classes': ('collapse',),
"fields": [
"address_city", "address_country", "address_country_code",
"address_name", "address_state", "address_status",
"address_street", "address_zip"
]
}),
("Buyer", {
"description": "The information about the Buyer.",
'classes': ('collapse',),
"fields": [
"first_name", "last_name", "payer_business_name", "payer_email",
"payer_id", "payer_status", "contact_phone", "residence_country"
]
}),
("Seller", {
"description": "The information about the Seller.",
'classes': ('collapse',),
"fields": [
"business", "item_name", "item_number", "quantity",
"receiver_email", "receiver_id", "custom", "invoice", "memo"
]
}),
("Recurring", {
"description": "Information about recurring Payments.",
"classes": ("collapse",),
"fields": [
"profile_status", "initial_payment_amount", "amount_per_cycle",
"outstanding_balance", "period_type", "product_name",
"product_type", "recurring_payment_id", "receipt_id",
"next_payment_date"
]
}),
("Admin", {
"description": "Additional Info.",
"classes": ('collapse',),
"fields": [
"test_ipn", "ipaddress", "query", "response", "flag_code",
"flag_info"
]
}),
)
list_display = [
"__unicode__", "flag", "flag_info", "invoice", "custom",
"payment_status", "created_at"
]
search_fields = ["txn_id", "recurring_payment_id"]
admin.site.register(PayPalIPN, PayPalIPNAdmin) | {
"content_hash": "34bb82295a2446672e1da6904c8b7834",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 37.582089552238806,
"alnum_prop": 0.4670373312152502,
"repo_name": "bluestemscott/librarygadget",
"id": "3f0423091440b1cb6a2e35ed05651007f037f882",
"size": "2566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "librarygadget/paypal/standard/ipn/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8299"
},
{
"name": "HTML",
"bytes": "2721198"
},
{
"name": "JavaScript",
"bytes": "162"
},
{
"name": "Python",
"bytes": "323926"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
} |
from keys import key_patterns
class Bindings(object):
def __init__(self, key_patterns):
self._tag_to_key_labels = {}
self._tag_to_help = {}
self._key_label_to_tag = {}
self._key_patterns = key_patterns
self._cache_keycode_to_tag = {}
def get_tag(self, keycode):
'''Gets tag for keycode, returns None if no tag found.'''
if keycode is None:
return None
if not keycode in self._cache_keycode_to_tag:
label = self.get_key_label_from_keycode(keycode)
self._cache_keycode_to_tag[keycode] = self.get_tag_from_key_label(label)
return self._cache_keycode_to_tag[keycode]
def get_tag_from_key_label(self, label):
'''Get tag using key label, if no match, returns None.'''
return self._key_label_to_tag.get(label, None)
def get_key_label_from_keycode(self, keycode, extra_info = False):
'''Get tag using keycode, if no match, returns None.'''
label = None
for mask in reversed(sorted(self._key_patterns.keys())):
masked_keycode = keycode & mask
if masked_keycode in self._key_patterns[mask]:
label = self._key_patterns[mask][masked_keycode]
break
if extra_info:
return label, [keycode & mask for mask in reversed(sorted(self._key_patterns.keys()))]
else:
return label
def add(self, tag, key, help_text):
self.add_multikey(tag, (key,), help_text)
def add_multikey(self, tag, key_labels, help_text):
for key_label in key_labels:
assert key_label not in self._key_label_to_tag, (
'Key "%s" cannot be bound to "%s" because it is already bound to "%s"' %
(key_label, tag, self._key_label_to_tag[key_label])
)
self._key_label_to_tag[key_label] = tag
self._tag_to_key_labels[tag] = key_labels
self._tag_to_help[tag] = help_text
def get_key_help(self, tag):
return (self._tag_to_key_labels[tag], self._tag_to_help[tag])
_ = Bindings(key_patterns)
# Core
_.add('freeze_cam', 'f',
'Freeze or unfreeze camera capture')
_.add('toggle_input_mode', 'c',
'Toggle between camera and static files')
_.add_multikey('static_file_increment', ['e', 'pgdn'],
'Load next static file')
_.add_multikey('static_file_decrement', ['w', 'pgup'],
'Load previous static file')
_.add('help_mode', 'h',
'Toggle this help screen')
_.add('stretch_mode', '0',
'Toggle between cropping and stretching static files to be square')
_.add('debug_level', '5',
'Cycle debug level between 0 (quiet), 1 (some timing info) and 2 (all timing info)')
_.add('quit', 'q',
'Quit')
# Caffevis
_.add_multikey('reset_state', ['esc'],
'Reset: turn off backprop, reset to layer 0, unit 0, default boost.')
_.add_multikey('sel_left', ['left', 'j'],
'')
_.add_multikey('sel_right', ['right', 'l'],
'')
_.add_multikey('sel_down', ['down', 'k'],
'')
_.add_multikey('sel_up', ['up', 'i'],
'')
_.add('sel_left_fast', 'J',
'')
_.add('sel_right_fast', 'L',
'')
_.add('sel_down_fast', 'K',
'')
_.add('sel_up_fast', 'I',
'')
_.add_multikey('sel_layer_left', ['u', 'U'],
'Select previous layer without moving cursor')
_.add_multikey('sel_layer_right', ['o', 'O'],
'Select next layer without moving cursor')
_.add('zoom_mode', 'z',
'Cycle zooming through {currently selected unit, backprop results, none}')
_.add('pattern_mode', 's',
'Toggle overlay of preferred input pattern (regularized optimized images)')
_.add('ez_back_mode_loop', 'b',
'Cycle through a few common backprop/deconv modes')
_.add('freeze_back_unit', 'd',
'Freeze the bprop/deconv origin to be the currently selected unit')
_.add('show_back', 'a',
'Toggle between showing forward activations and back/deconv diffs')
_.add('back_mode', 'n',
'(expert) Change back mode directly.')
_.add('back_filt_mode', 'm',
'(expert) Change back output filter directly.')
_.add('boost_gamma', 't',
'Boost contrast using gamma correction')
_.add('boost_individual', 'T',
'Boost contrast by scaling each channel to use more of its individual range')
_.add('toggle_label_predictions', '8',
'Turn on or off display of prob label values')
_.add('toggle_unit_jpgs', '9',
'Turn on or off display of loaded jpg visualization')
bindings = _
| {
"content_hash": "ece9e25cef3f5c7bf13aad5d61dae093",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 98,
"avg_line_length": 36.488,
"alnum_prop": 0.588467441350581,
"repo_name": "yosinski/deep-visualization-toolbox",
"id": "7de7f9dfd9b5a216da9d52c5eb90c2ebf94f6462",
"size": "4584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bindings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "206962"
},
{
"name": "Shell",
"bytes": "3817"
}
],
"symlink_target": ""
} |
import hashlib
import sys
import time
import traceback
import json
from typing import Optional
import certifi
import urllib.parse
import aiohttp
try:
from . import paymentrequest_pb2 as pb2
except ImportError:
sys.exit("Error: could not find paymentrequest_pb2.py. Create it with 'protoc --proto_path=electrum/ --python_out=electrum/ electrum/paymentrequest.proto'")
from . import bitcoin, ecc, util, transaction, x509, rsakey
from .util import bh2u, bfh, export_meta, import_meta, make_aiohttp_session
from .crypto import sha256
from .bitcoin import TYPE_ADDRESS
from .transaction import TxOutput
from .network import Network
from .logging import get_logger, Logger
_logger = get_logger(__name__)
REQUEST_HEADERS = {'Accept': 'application/bitcoin-paymentrequest', 'User-Agent': 'Electrum'}
ACK_HEADERS = {'Content-Type':'application/bitcoin-payment','Accept':'application/bitcoin-paymentack','User-Agent':'Electrum'}
ca_path = certifi.where()
ca_list = None
ca_keyID = None
def load_ca_list():
global ca_list, ca_keyID
if ca_list is None:
ca_list, ca_keyID = x509.load_certificates(ca_path)
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
async def get_payment_request(url: str) -> 'PaymentRequest':
u = urllib.parse.urlparse(url)
error = None
if u.scheme in ('http', 'https'):
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=REQUEST_HEADERS) as session:
async with session.get(url) as response:
resp_content = await response.read()
response.raise_for_status()
# Guard against `bitcoin:`-URIs with invalid payment request URLs
if "Content-Type" not in response.headers \
or response.headers["Content-Type"] != "application/bitcoin-paymentrequest":
data = None
error = "payment URL not pointing to a payment request handling server"
else:
data = resp_content
data_len = len(data) if data is not None else None
_logger.info(f'fetched payment request {url} {data_len}')
except aiohttp.ClientError as e:
error = f"Error while contacting payment URL: {url}.\nerror type: {type(e)}"
if isinstance(e, aiohttp.ClientResponseError):
error += f"\nGot HTTP status code {e.status}."
if resp_content:
try:
error_text_received = resp_content.decode("utf8")
except UnicodeDecodeError:
error_text_received = "(failed to decode error)"
else:
error_text_received = error_text_received[:400]
error_oneline = ' -- '.join(error.split('\n'))
_logger.info(f"{error_oneline} -- [DO NOT TRUST THIS MESSAGE] "
f"{repr(e)} text: {error_text_received}")
data = None
elif u.scheme == 'file':
try:
with open(u.path, 'r', encoding='utf-8') as f:
data = f.read()
except IOError:
data = None
error = "payment URL not pointing to a valid file"
else:
data = None
error = f"Unknown scheme for payment request. URL: {url}"
pr = PaymentRequest(data, error=error)
return pr
class PaymentRequest:
def __init__(self, data, *, error=None):
self.raw = data
self.error = error # FIXME overloaded and also used when 'verify' succeeds
self.parse(data)
self.requestor = None # known after verify
self.tx = None
def __str__(self):
return str(self.raw)
def parse(self, r):
self.outputs = []
if self.error:
return
self.id = bh2u(sha256(r)[0:16])
try:
self.data = pb2.PaymentRequest()
self.data.ParseFromString(r)
except:
self.error = "cannot parse payment request"
return
self.details = pb2.PaymentDetails()
self.details.ParseFromString(self.data.serialized_payment_details)
for o in self.details.outputs:
type_, addr = transaction.get_address_from_output_script(o.script)
if type_ != TYPE_ADDRESS:
# TODO maybe rm restriction but then get_requestor and get_id need changes
self.error = "only addresses are allowed as outputs"
return
self.outputs.append(TxOutput(type_, addr, o.amount))
self.memo = self.details.memo
self.payment_url = self.details.payment_url
def is_pr(self):
return self.get_amount() != 0
#return self.get_outputs() != [(TYPE_ADDRESS, self.get_requestor(), self.get_amount())]
def verify(self, contacts):
if self.error:
return False
if not self.raw:
self.error = "Empty request"
return False
pr = pb2.PaymentRequest()
try:
pr.ParseFromString(self.raw)
except:
self.error = "Error: Cannot parse payment request"
return False
if not pr.signature:
# the address will be displayed as requestor
self.requestor = None
return True
if pr.pki_type in ["x509+sha256", "x509+sha1"]:
return self.verify_x509(pr)
elif pr.pki_type in ["dnssec+btc", "dnssec+ecdsa"]:
return self.verify_dnssec(pr, contacts)
else:
self.error = "ERROR: Unsupported PKI Type for Message Signature"
return False
def verify_x509(self, paymntreq):
load_ca_list()
if not ca_list:
self.error = "Trusted certificate authorities list not found"
return False
cert = pb2.X509Certificates()
cert.ParseFromString(paymntreq.pki_data)
# verify the chain of certificates
try:
x, ca = verify_cert_chain(cert.certificate)
except BaseException as e:
_logger.exception('')
self.error = str(e)
return False
# get requestor name
self.requestor = x.get_common_name()
if self.requestor.startswith('*.'):
self.requestor = self.requestor[2:]
# verify the BIP70 signature
pubkey0 = rsakey.RSAKey(x.modulus, x.exponent)
sig = paymntreq.signature
paymntreq.signature = b''
s = paymntreq.SerializeToString()
sigBytes = bytearray(sig)
msgBytes = bytearray(s)
if paymntreq.pki_type == "x509+sha256":
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
verify = pubkey0.verify(sigBytes, x509.PREFIX_RSA_SHA256 + hashBytes)
elif paymntreq.pki_type == "x509+sha1":
verify = pubkey0.hashAndVerify(sigBytes, msgBytes)
else:
self.error = f"ERROR: unknown pki_type {paymntreq.pki_type} in Payment Request"
return False
if not verify:
self.error = "ERROR: Invalid Signature for Payment Request Data"
return False
### SIG Verified
self.error = 'Signed by Trusted CA: ' + ca.get_common_name()
return True
def verify_dnssec(self, pr, contacts):
sig = pr.signature
alias = pr.pki_data
info = contacts.resolve(alias)
if info.get('validated') is not True:
self.error = "Alias verification failed (DNSSEC)"
return False
if pr.pki_type == "dnssec+btc":
self.requestor = alias
address = info.get('address')
pr.signature = b''
message = pr.SerializeToString()
if ecc.verify_message_with_address(address, sig, message):
self.error = 'Verified with DNSSEC'
return True
else:
self.error = "verify failed"
return False
else:
self.error = "unknown algo"
return False
def has_expired(self) -> Optional[bool]:
if not hasattr(self, 'details'):
return None
return self.details.expires and self.details.expires < int(time.time())
def get_expiration_date(self):
return self.details.expires
def get_amount(self):
return sum(map(lambda x:x[2], self.outputs))
def get_address(self):
o = self.outputs[0]
assert o.type == TYPE_ADDRESS
return o.address
def get_requestor(self):
return self.requestor if self.requestor else self.get_address()
def get_verify_status(self):
return self.error if self.requestor else "No Signature"
def get_memo(self):
return self.memo
def get_dict(self):
return {
'requestor': self.get_requestor(),
'memo':self.get_memo(),
'exp': self.get_expiration_date(),
'amount': self.get_amount(),
'signature': self.get_verify_status(),
'txid': self.tx,
'outputs': self.get_outputs()
}
def get_id(self):
return self.id if self.requestor else self.get_address()
def get_outputs(self):
return self.outputs[:]
async def send_payment_and_receive_paymentack(self, raw_tx, refund_addr):
pay_det = self.details
if not self.details.payment_url:
return False, "no url"
paymnt = pb2.Payment()
paymnt.merchant_data = pay_det.merchant_data
paymnt.transactions.append(bfh(raw_tx))
ref_out = paymnt.refund_to.add()
ref_out.script = util.bfh(transaction.Transaction.pay_script(TYPE_ADDRESS, refund_addr))
paymnt.memo = "Paid using Electrum"
pm = paymnt.SerializeToString()
payurl = urllib.parse.urlparse(pay_det.payment_url)
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=ACK_HEADERS) as session:
async with session.post(payurl.geturl(), data=pm) as response:
resp_content = await response.read()
response.raise_for_status()
try:
paymntack = pb2.PaymentACK()
paymntack.ParseFromString(resp_content)
except Exception:
return False, "PaymentACK could not be processed. Payment was sent; please manually verify that payment was received."
print(f"PaymentACK message received: {paymntack.memo}")
return True, paymntack.memo
except aiohttp.ClientError as e:
error = f"Payment Message/PaymentACK Failed:\nerror type: {type(e)}"
if isinstance(e, aiohttp.ClientResponseError):
error += f"\nGot HTTP status code {e.status}."
if resp_content:
try:
error_text_received = resp_content.decode("utf8")
except UnicodeDecodeError:
error_text_received = "(failed to decode error)"
else:
error_text_received = error_text_received[:400]
error_oneline = ' -- '.join(error.split('\n'))
_logger.info(f"{error_oneline} -- [DO NOT TRUST THIS MESSAGE] "
f"{repr(e)} text: {error_text_received}")
return False, error
def make_unsigned_request(req):
from .transaction import Transaction
addr = req['address']
time = req.get('time', 0)
exp = req.get('exp', 0)
if time and type(time) != int:
time = 0
if exp and type(exp) != int:
exp = 0
amount = req['amount']
if amount is None:
amount = 0
memo = req['memo']
script = bfh(Transaction.pay_script(TYPE_ADDRESS, addr))
outputs = [(script, amount)]
pd = pb2.PaymentDetails()
for script, amount in outputs:
pd.outputs.add(amount=amount, script=script)
pd.time = time
pd.expires = time + exp if exp else 0
pd.memo = memo
pr = pb2.PaymentRequest()
pr.serialized_payment_details = pd.SerializeToString()
pr.signature = util.to_bytes('')
return pr
def sign_request_with_alias(pr, alias, alias_privkey):
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(alias)
message = pr.SerializeToString()
ec_key = ecc.ECPrivkey(alias_privkey)
compressed = bitcoin.is_compressed_privkey(alias_privkey)
pr.signature = ec_key.sign_message(message, compressed)
def verify_cert_chain(chain):
""" Verify a chain of certificates. The last certificate is the CA"""
load_ca_list()
# parse the chain
cert_num = len(chain)
x509_chain = []
for i in range(cert_num):
x = x509.X509(bytearray(chain[i]))
x509_chain.append(x)
if i == 0:
x.check_date()
else:
if not x.check_ca():
raise Exception("ERROR: Supplied CA Certificate Error")
if not cert_num > 1:
raise Exception("ERROR: CA Certificate Chain Not Provided by Payment Processor")
# if the root CA is not supplied, add it to the chain
ca = x509_chain[cert_num-1]
if ca.getFingerprint() not in ca_list:
keyID = ca.get_issuer_keyID()
f = ca_keyID.get(keyID)
if f:
root = ca_list[f]
x509_chain.append(root)
else:
raise Exception("Supplied CA Not Found in Trusted CA Store.")
# verify the chain of signatures
cert_num = len(x509_chain)
for i in range(1, cert_num):
x = x509_chain[i]
prev_x = x509_chain[i-1]
algo, sig, data = prev_x.get_signature()
sig = bytearray(sig)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
if algo == x509.ALGO_RSA_SHA1:
verify = pubkey.hashAndVerify(sig, data)
elif algo == x509.ALGO_RSA_SHA256:
hashBytes = bytearray(hashlib.sha256(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA256 + hashBytes)
elif algo == x509.ALGO_RSA_SHA384:
hashBytes = bytearray(hashlib.sha384(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA384 + hashBytes)
elif algo == x509.ALGO_RSA_SHA512:
hashBytes = bytearray(hashlib.sha512(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA512 + hashBytes)
else:
raise Exception("Algorithm not supported: {}".format(algo))
if not verify:
raise Exception("Certificate not Signed by Provided CA Certificate Chain")
return x509_chain[0], ca
def check_ssl_config(config):
from . import pem
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
# verify chain
x, ca = verify_cert_chain(bList)
# verify that privkey and pubkey match
privkey = rsakey.RSAKey(*params)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
assert x.modulus == params[0]
assert x.exponent == params[1]
# return requestor
requestor = x.get_common_name()
if requestor.startswith('*.'):
requestor = requestor[2:]
return requestor
def sign_request_with_x509(pr, key_path, cert_path):
from . import pem
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
privkey = rsakey.RSAKey(*params)
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
certificates = pb2.X509Certificates()
certificates.certificate.extend(map(bytes, bList))
pr.pki_type = 'x509+sha256'
pr.pki_data = certificates.SerializeToString()
msgBytes = bytearray(pr.SerializeToString())
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
sig = privkey.sign(x509.PREFIX_RSA_SHA256 + hashBytes)
pr.signature = bytes(sig)
def serialize_request(req):
pr = make_unsigned_request(req)
signature = req.get('sig')
requestor = req.get('name')
if requestor and signature:
pr.signature = bfh(signature)
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(requestor)
return pr
def make_request(config, req):
pr = make_unsigned_request(req)
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
if key_path and cert_path:
sign_request_with_x509(pr, key_path, cert_path)
return pr
class InvoiceStore(Logger):
def __init__(self, storage):
Logger.__init__(self)
self.storage = storage
self.invoices = {}
self.paid = {}
d = self.storage.get('invoices', {})
self.load(d)
def set_paid(self, pr, txid):
pr.tx = txid
pr_id = pr.get_id()
self.paid[txid] = pr_id
if pr_id not in self.invoices:
# in case the user had deleted it previously
self.add(pr)
def load(self, d):
for k, v in d.items():
try:
pr = PaymentRequest(bfh(v.get('hex')))
pr.tx = v.get('txid')
pr.requestor = v.get('requestor')
self.invoices[k] = pr
if pr.tx:
self.paid[pr.tx] = k
except:
continue
def import_file(self, path):
def validate(data):
return data # TODO
import_meta(path, validate, self.on_import)
def on_import(self, data):
self.load(data)
self.save()
def export_file(self, filename):
export_meta(self.dump(), filename)
def dump(self):
d = {}
for k, pr in self.invoices.items():
d[k] = {
'hex': bh2u(pr.raw),
'requestor': pr.requestor,
'txid': pr.tx
}
return d
def save(self):
self.storage.put('invoices', self.dump())
def get_status(self, key):
pr = self.get(key)
if pr is None:
self.logger.info(f"get_status() can't find pr for {key}")
return
if pr.tx is not None:
return PR_PAID
if pr.has_expired():
return PR_EXPIRED
return PR_UNPAID
def add(self, pr):
key = pr.get_id()
self.invoices[key] = pr
self.save()
return key
def remove(self, key):
self.invoices.pop(key)
self.save()
def get(self, k):
return self.invoices.get(k)
def sorted_list(self):
# sort
return self.invoices.values()
def unpaid_invoices(self):
return [self.invoices[k] for k in
filter(lambda x: self.get_status(x) not in (PR_PAID, None),
self.invoices.keys())
]
| {
"content_hash": "ec2d69093440b071d0c7447121de58c6",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 160,
"avg_line_length": 35.26508226691042,
"alnum_prop": 0.5759460860549508,
"repo_name": "fujicoin/electrum-fjc",
"id": "fedcd1b3a14e6ea73aa71c2f7a2d847bd132e710",
"size": "20452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "electrum/paymentrequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7756"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "877"
},
{
"name": "NSIS",
"bytes": "7450"
},
{
"name": "Python",
"bytes": "2346736"
},
{
"name": "Shell",
"bytes": "30493"
}
],
"symlink_target": ""
} |
import json
import tempfile
from preggy import expect
from tests.base import TestCase
from thumbor import __version__
from thumbor.config import Config
from thumbor.context import ServerParameters
from thumbor.error_handlers.file import ErrorHandler
class FakeRequest:
def __init__(self):
self.headers = {
"header1": "value1",
"Cookie": "cookie1=value; cookie2=value2;",
}
self.url = "test/"
self.method = "GET"
self.arguments = []
self.body = "body"
self.query = "a=1&b=2"
self.remote_ip = "127.0.0.1"
def full_url(self):
return f"http://test/{self.url}"
class FakeHandler:
def __init__(self):
self.request = FakeRequest()
class InvalidFileErrorHandlerTestCase(TestCase):
def test_when_invalid_empty_configuration(self):
with expect.error_to_happen(RuntimeError):
ErrorHandler(self.config)
def test_when_invalid_configuration_of_filename_with_context_should_be_error(
self,
):
cfg = Config(
ERROR_FILE_NAME_USE_CONTEXT="server..port",
ERROR_FILE_LOGGER="toto",
)
with expect.error_to_happen(RuntimeError):
ErrorHandler(cfg)
class BasicFileErrorHandlerTestCase(TestCase):
def get_config(self):
self.tmp = (
tempfile.NamedTemporaryFile( # pylint: disable=consider-using-with
prefix="thumborTest."
)
)
return Config(SECURITY_KEY="ACME-SEC", ERROR_FILE_LOGGER=self.tmp.name)
def get_server(self):
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
return server
def test_when_error_occurs_should_have_called_client(self):
handler = ErrorHandler(self.config)
http_handler = FakeHandler()
handler.handle_error(self.context, http_handler, RuntimeError("Test"))
content = self.tmp.read()
log = json.loads(content.decode("utf-8"))
del log["extra"]["timestamp"]
expect(log).to_be_like(
{
"Http": {
"url": "http://test/test/",
"method": "GET",
"data": [],
"body": "body",
"query_string": "a=1&b=2",
},
"interfaces.User": {"ip": "127.0.0.1"},
"exception": "Test",
"extra": {
"thumbor-version": __version__,
"Headers": {
"header1": "value1",
"Cookie": {"cookie1": "value", "cookie2": "value2"},
},
},
}
)
class FileErrorHandlerTestCase(TestCase):
PORT = 8890
def get_config(self):
self.tmp = (
tempfile.NamedTemporaryFile( # pylint: disable=consider-using-with
prefix=f"thumborTest.{self.PORT}."
)
)
return Config(
SECURITY_KEY="ACME-SEC",
ERROR_FILE_LOGGER=self.tmp.name.replace(
f"thumborTest.{self.PORT}.", "thumborTest.%i."
),
ERROR_FILE_NAME_USE_CONTEXT="server.port",
)
def get_server(self):
server = ServerParameters(
self.PORT, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
return server
def test_when_error_occurs_i_use_context_should_have_called_client(self):
handler = ErrorHandler(self.config)
http_handler = FakeHandler()
handler.handle_error(self.context, http_handler, RuntimeError("Test"))
content = self.tmp.read()
# check against json version
log = json.loads(content.decode("utf-8"))
del log["extra"]["timestamp"]
expect(log).to_be_like(
{
"Http": {
"url": "http://test/test/",
"method": "GET",
"data": [],
"body": "body",
"query_string": "a=1&b=2",
},
"interfaces.User": {"ip": "127.0.0.1"},
"exception": "Test",
"extra": {
"thumbor-version": __version__,
"Headers": {
"header1": "value1",
"Cookie": {"cookie1": "value", "cookie2": "value2"},
},
},
}
)
| {
"content_hash": "35b861dfaab6948937ba9b7e2d0e07a5",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 81,
"avg_line_length": 30.93959731543624,
"alnum_prop": 0.5036876355748373,
"repo_name": "thumbor/thumbor",
"id": "88647bc5fc66ee0a72b9692e3150513f008c40f5",
"size": "4862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/error_handlers/test_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59023"
},
{
"name": "Dockerfile",
"bytes": "1631"
},
{
"name": "JavaScript",
"bytes": "2514"
},
{
"name": "Makefile",
"bytes": "11947"
},
{
"name": "Python",
"bytes": "716804"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
scope = "System"
description = """
This role grants a user basic object creation and editing permission.
"""
permissions = {
"read": [
"AccessControlList",
"Audit",
"Snapshot",
"Categorization",
"Category",
"Comment",
"ControlCategory",
"ControlAssertion",
"Control",
"Assessment",
"AssessmentTemplate",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectDocument",
"ObjectOwner",
"ObjectPerson",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
"Person",
"Program",
"Revision",
"Role",
"Context",
"UserRole",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
],
"create": [
"Audit",
"Snapshot",
"Workflow",
"Categorization",
"Category",
"Comment",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectDocument",
"ObjectPerson",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
"ObjectOwner",
"Program",
"Role",
"UserRole",
"Context",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Audit",
"Snapshot",
"Workflow",
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectDocument",
"ObjectPerson",
"Person",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
"ObjectOwner",
"Program",
"Role",
"UserRole",
"Context",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
],
"delete": [
"Audit",
"Workflow",
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectDocument",
"ObjectPerson",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
"ObjectOwner",
"Program",
"Role",
"UserRole",
"Context",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
]
}
| {
"content_hash": "f84afdadda7bfe436810e20f1454b598",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 71,
"avg_line_length": 21.91703056768559,
"alnum_prop": 0.42657899980075714,
"repo_name": "AleksNeStu/ggrc-core",
"id": "495a6dd8a57b24a717ea34b69b9c4658eefcf9e3",
"size": "5132",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/0.10-Raspberry",
"path": "src/ggrc_basic_permissions/roles/Editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "221201"
},
{
"name": "HTML",
"bytes": "1055542"
},
{
"name": "JavaScript",
"bytes": "1872353"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2700938"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
} |
from glob import glob
from os import path
import argparse
import logging
import sys
log = logging.getLogger("bsdploy")
# register our own library and roles paths into ansible
bsdploy_path = path.abspath(path.dirname(__file__))
ansible_paths = dict(
roles=[path.join(bsdploy_path, 'roles')],
library=[path.join(bsdploy_path, 'library')])
virtualbox_instance_defaults = {
'vm-ostype': 'FreeBSD_64',
'vm-memory': '2048',
'vm-accelerate3d': 'off',
'vm-acpi': 'on',
'vm-rtcuseutc': 'on',
'vm-boot1': 'disk',
'vm-boot2': 'dvd',
'vm-nic1': 'hostonly',
'vm-hostonlyadapter1': 'vboxnet0',
}
virtualbox_hostonlyif_defaults = {
'ip': '192.168.56.1',
}
virtualbox_dhcpserver_defaults = {
'ip': '192.168.56.2',
'netmask': '255.255.255.0',
'lowerip': '192.168.56.100',
'upperip': '192.168.56.254',
}
virtualbox_bootdisk_defaults = {
'size': '102400',
}
ez_instance_defaults = {
'ansible_python_interpreter': '/usr/local/bin/python2.7',
'fabric-shell': '/bin/sh -c',
}
class PloyBootstrapCmd(object):
def __init__(self, ctrl):
self.ctrl = ctrl
def __call__(self, argv, help):
"""Bootstrap a jailhost that's been booted into MFSBsd."""
parser = argparse.ArgumentParser(
prog="%s bootstrap" % self.ctrl.progname,
description=help)
masters = dict((master.id, master) for master in self.ctrl.get_masters('ezjail_admin'))
parser.add_argument(
"master",
nargs='?' if len(masters) == 1 else 1,
metavar="master",
help="Name of the jailhost from the config.",
choices=masters,
default=list(masters.keys())[0] if len(masters) == 1 else None)
parser.add_argument(
"-y", "--yes", action="store_true",
help="Answer yes to all questions.")
parser.add_argument(
"-p", "--http-proxy",
help="Use http proxy for bootstrapping and pkg installation")
args = parser.parse_args(argv)
master = args.master if len(masters) == 1 else args.master[0]
instance = self.ctrl.instances[master]
instance.config.setdefault('ssh-timeout', 90)
instance.hooks.before_bsdploy_bootstrap(instance)
bootstrap_args = {'bootstrap-yes': args.yes}
if args.http_proxy:
bootstrap_args['http_proxy'] = args.http_proxy
instance.do('bootstrap', **bootstrap_args)
instance.hooks.after_bsdploy_bootstrap(instance)
def get_bootstrap_path(instance):
from ploy_ansible import get_playbooks_directory
host_defined_path = instance.config.get('bootstrap-files')
main_config = instance.master.main_config
ploy_conf_path = main_config.path
if host_defined_path is None:
playbooks_directory = get_playbooks_directory(main_config)
bootstrap_path = path.join(playbooks_directory, instance.uid, 'bootstrap-files')
else:
bootstrap_path = path.join(ploy_conf_path, host_defined_path)
return bootstrap_path
def get_ssh_key_paths(instance):
bootstrap_path = get_bootstrap_path(instance)
glob_path = path.join(bootstrap_path, 'ssh_host*_key.pub')
key_paths = []
for ssh_key in glob(glob_path):
ssh_key = path.abspath(ssh_key)
key_paths.append(ssh_key)
return key_paths
def augment_instance(instance):
from ploy_ansible import get_playbooks_directory
from ploy_ansible import has_playbook
from ploy.config import ConfigSection
main_config = instance.master.main_config
# provide virtualbox specific convenience defaults:
if instance.master.sectiongroupname == ('vb-instance'):
# default values for virtualbox instance
for key, value in virtualbox_instance_defaults.items():
instance.config.setdefault(key, value)
# default hostonly interface
hostonlyif = main_config.setdefault('vb-hostonlyif', ConfigSection())
vboxnet0 = hostonlyif.setdefault('vboxnet0', ConfigSection())
for key, value in virtualbox_hostonlyif_defaults.items():
vboxnet0.setdefault(key, value)
# default dhcp server
dhcpserver = main_config.setdefault('vb-dhcpserver', ConfigSection())
vboxnet0 = dhcpserver.setdefault('vboxnet0', ConfigSection())
for key, value in virtualbox_dhcpserver_defaults.items():
vboxnet0.setdefault(key, value)
# default virtual disk
if 'vb-disk:defaultdisk' in instance.config.get('storage', {}):
disks = main_config.setdefault('vb-disk', ConfigSection())
defaultdisk = disks.setdefault('defaultdisk', ConfigSection())
for key, value in virtualbox_bootdisk_defaults.items():
defaultdisk.setdefault(key, value)
if not instance.master.sectiongroupname.startswith('ez-'):
return
for key, value in ez_instance_defaults.items():
instance.config.setdefault(key, value)
if 'fabfile' not in instance.config:
playbooks_directory = get_playbooks_directory(main_config)
fabfile = path.join(playbooks_directory, instance.uid, 'fabfile.py')
if path.exists(fabfile):
instance.config['fabfile'] = fabfile
else:
fabfile = path.join(playbooks_directory, instance.id, 'fabfile.py')
if path.exists(fabfile):
instance.config['fabfile'] = fabfile
if instance.master.instance is instance:
# for hosts
if 'fabfile' not in instance.config:
bootstrap_type = instance.config.get('bootstrap', 'mfsbsd')
fabfile = path.join(bsdploy_path, 'fabfile_%s.py' % bootstrap_type)
instance.config['fabfile'] = fabfile
if not path.exists(instance.config['fabfile']):
log.error("The fabfile '%s' for instance '%s' doesn't exist." % (
instance.config['fabfile'], instance.uid))
sys.exit(1)
if not has_playbook(instance):
instance.config['roles'] = 'jails_host'
if 'ssh-host-keys' not in instance.config:
key_paths = get_ssh_key_paths(instance)
instance.config['ssh-host-keys'] = "\n".join(key_paths)
else:
# for jails
instance.config.setdefault('startup_script', path.join(
bsdploy_path, 'startup-ansible-jail.sh'))
instance.config.setdefault('flavour', 'bsdploy_base')
def get_commands(ctrl):
return [('bootstrap', PloyBootstrapCmd(ctrl))]
plugin = dict(
augment_instance=augment_instance,
get_commands=get_commands)
| {
"content_hash": "d95a8b1e7378970b134a337907d16c3e",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 95,
"avg_line_length": 35.3475935828877,
"alnum_prop": 0.6355521936459909,
"repo_name": "ployground/bsdploy",
"id": "9176befb89cc9c06ead6ff48e2fac5ecdc1c5d41",
"size": "6610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bsdploy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Nix",
"bytes": "212"
},
{
"name": "Python",
"bytes": "113675"
},
{
"name": "Shell",
"bytes": "489"
}
],
"symlink_target": ""
} |
"""Tests for ljspeech dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.audio import ljspeech
class LJSpeechTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = ljspeech.Ljspeech
SPLITS = {
"train": 2,
}
if __name__ == "__main__":
testing.test_main()
| {
"content_hash": "fa2fcbe4bec6df7b6a9e751917392f5d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 51,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7081967213114754,
"repo_name": "tensorflow/datasets",
"id": "8b49fb2c502c5e44b293bf9a31f17a8350b7a2e1",
"size": "917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/audio/ljspeech_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
} |
DIRECTORIES = (
('assets', 'make public'),
('content', 'make html'),
)
| {
"content_hash": "7d4f1d5fe096f44c00cb205287e0c2ea",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 30,
"avg_line_length": 19.75,
"alnum_prop": 0.5316455696202531,
"repo_name": "reusine/bababille.com",
"id": "38213b95d4966ab21839bcc068edacfddfd18589",
"size": "79",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gorun_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13611"
},
{
"name": "HTML",
"bytes": "12406"
},
{
"name": "JavaScript",
"bytes": "90"
},
{
"name": "Makefile",
"bytes": "3089"
},
{
"name": "Python",
"bytes": "1463"
},
{
"name": "Ruby",
"bytes": "41"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.admin.sites import AdminSite
from bejmy.users.models import User
from bejmy.users.admin import UserAdmin
class MockRequest:
pass
# class MockSuperUser:
# def has_perm(self, perm):
# return True
request = MockRequest()
# request.user = MockSuperUser()
class UserAdminTests(TestCase):
def setUp(self):
self.user = User(
username="testuser",
email="test@user.com",
)
self.site = AdminSite()
def test_get_fieldsets_has_extra_fieldset(self):
a = UserAdmin(User, self.site)
self.assertTrue(a.fieldset_extra in a.get_fieldsets(request))
| {
"content_hash": "8b7cad4e40358dc7d5e56ee560e919f6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 69,
"avg_line_length": 21.3125,
"alnum_prop": 0.6642228739002932,
"repo_name": "bejmy/backend",
"id": "6f2c768ec57e912474c717e9bcc333af78603852",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bejmy/users/tests/test_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8830"
},
{
"name": "Python",
"bytes": "80982"
}
],
"symlink_target": ""
} |
import re
from robot.utils import NormalizedDict
from .criticality import Criticality
from .stats import TagStat, CombinedTagStat
from .tags import TagPatterns
class TagStatistics(object):
"""Container for tag statistics."""
def __init__(self, combined_stats):
#: Dictionary, where key is the name of the tag as a string and value
#: is an instance of :class:`~robot.model.stats.TagStat`.
self.tags = NormalizedDict(ignore=['_'])
#: List of :class:`~robot.model.stats.CombinedTagStat` objects.
self.combined = combined_stats
def visit(self, visitor):
visitor.visit_tag_statistics(self)
def __iter__(self):
stats = list(self.tags.values()) + self.combined
return iter(sorted(stats))
class TagStatisticsBuilder(object):
def __init__(self, criticality=None, included=None, excluded=None,
combined=None, docs=None, links=None):
self._included = TagPatterns(included)
self._excluded = TagPatterns(excluded)
self._info = TagStatInfo(criticality, docs, links)
self.stats = TagStatistics(self._info.get_combined_stats(combined))
def add_test(self, test):
self._add_tags_to_statistics(test)
self._add_to_combined_statistics(test)
def _add_tags_to_statistics(self, test):
for tag in test.tags:
if self._is_included(tag):
if tag not in self.stats.tags:
self.stats.tags[tag] = self._info.get_stat(tag)
self.stats.tags[tag].add_test(test)
def _is_included(self, tag):
if self._included and not self._included.match(tag):
return False
return not self._excluded.match(tag)
def _add_to_combined_statistics(self, test):
for comb in self.stats.combined:
if comb.match(test.tags):
comb.add_test(test)
class TagStatInfo(object):
def __init__(self, criticality=None, docs=None, links=None):
self._criticality = criticality or Criticality()
self._docs = [TagStatDoc(*doc) for doc in docs or []]
self._links = [TagStatLink(*link) for link in links or []]
def get_stat(self, tag):
return TagStat(tag, self.get_doc(tag), self.get_links(tag),
self._criticality.tag_is_critical(tag),
self._criticality.tag_is_non_critical(tag))
def get_combined_stats(self, combined=None):
return [self.get_combined_stat(*comb) for comb in combined or []]
def get_combined_stat(self, pattern, name=None):
name = name or pattern
return CombinedTagStat(pattern, name, self.get_doc(name),
self.get_links(name))
def get_doc(self, tag):
return ' & '.join(doc.text for doc in self._docs if doc.match(tag))
def get_links(self, tag):
return [link.get_link(tag) for link in self._links if link.match(tag)]
class TagStatDoc(object):
def __init__(self, pattern, doc):
self._matcher = TagPatterns(pattern)
self.text = doc
def match(self, tag):
return self._matcher.match(tag)
class TagStatLink(object):
_match_pattern_tokenizer = re.compile('(\*|\?+)')
def __init__(self, pattern, link, title):
self._regexp = self._get_match_regexp(pattern)
self._link = link
self._title = title.replace('_', ' ')
def match(self, tag):
return self._regexp.match(tag) is not None
def get_link(self, tag):
match = self._regexp.match(tag)
if not match:
return None
link, title = self._replace_groups(self._link, self._title, match)
return link, title
def _replace_groups(self, link, title, match):
for index, group in enumerate(match.groups()):
placefolder = '%%%d' % (index+1)
link = link.replace(placefolder, group)
title = title.replace(placefolder, group)
return link, title
def _get_match_regexp(self, pattern):
pattern = '^%s$' % ''.join(self._yield_match_pattern(pattern))
return re.compile(pattern, re.IGNORECASE)
def _yield_match_pattern(self, pattern):
for token in self._match_pattern_tokenizer.split(pattern):
if token.startswith('?'):
yield '(%s)' % ('.'*len(token))
elif token == '*':
yield '(.*)'
else:
yield re.escape(token)
| {
"content_hash": "f918ebf4452904a5f2c5e93ff770ce2c",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 78,
"avg_line_length": 33.86363636363637,
"alnum_prop": 0.602013422818792,
"repo_name": "synsun/robotframework",
"id": "e50439c0ea053fc406127bb04eba57d8804f6ebe",
"size": "5078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/model/tagstatistics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "57462"
},
{
"name": "JavaScript",
"bytes": "160787"
},
{
"name": "Python",
"bytes": "2184737"
},
{
"name": "RobotFramework",
"bytes": "2009226"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
import re
import pytest
from mimesis.data import EXTENSIONS, MIME_TYPES
from mimesis.enums import FileType, MimeType
from mimesis.exceptions import NonEnumerableError
from mimesis.providers.file import File
from . import patterns
class TestFile:
@pytest.fixture
def file(self):
return File()
def test_str(self, file):
assert re.match(patterns.PROVIDER_STR_REGEX, str(file))
@pytest.mark.parametrize(
"extension",
[
FileType.AUDIO,
FileType.COMPRESSED,
FileType.DATA,
FileType.EXECUTABLE,
FileType.IMAGE,
FileType.SOURCE,
FileType.TEXT,
FileType.VIDEO,
],
)
def test_extension(self, file, extension):
ext = file.extension(file_type=extension)
assert ext in EXTENSIONS[extension.value]
@pytest.mark.parametrize(
"type_",
[
MimeType.APPLICATION,
MimeType.AUDIO,
MimeType.IMAGE,
MimeType.MESSAGE,
MimeType.TEXT,
MimeType.VIDEO,
],
)
def test_mime_type(self, file, type_):
result = file.mime_type(type_=type_)
assert result in MIME_TYPES[type_.value]
with pytest.raises(NonEnumerableError):
file.mime_type(type_="nil")
@pytest.mark.parametrize(
"file_type",
[
FileType.AUDIO,
FileType.COMPRESSED,
FileType.DATA,
FileType.EXECUTABLE,
FileType.IMAGE,
FileType.SOURCE,
FileType.TEXT,
FileType.VIDEO,
],
)
def test_file_name(self, file, file_type):
result = file.file_name(file_type=file_type)
assert isinstance(result, str)
assert result
def test_size(self, file):
result = file.size(10, 10)
size = result.split(" ")[0].strip()
assert int(size) == 10
class TestSeededFile:
@pytest.fixture
def f1(self, seed):
return File(seed=seed)
@pytest.fixture
def f2(self, seed):
return File(seed=seed)
def test_extension(self, f1, f2):
assert f1.extension() == f2.extension()
assert f1.extension(file_type=FileType.AUDIO) == f2.extension(
file_type=FileType.AUDIO
)
def test_mime_type(self, f1, f2):
assert f1.mime_type() == f2.mime_type()
assert f1.mime_type(type_=MimeType.IMAGE) == f2.mime_type(type_=MimeType.IMAGE)
def test_file_name(self, f1, f2):
assert f1.file_name() == f2.file_name()
assert f1.file_name(file_type=FileType.SOURCE) == f2.file_name(
file_type=FileType.SOURCE
)
def test_size(self, f1, f2):
assert f1.size() == f2.size()
assert f1.size(minimum=8, maximum=1024) == f2.size(minimum=8, maximum=1024)
| {
"content_hash": "138f54007c1268fe18cb9ebaa5be00b8",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 87,
"avg_line_length": 26.878504672897197,
"alnum_prop": 0.5782336578581363,
"repo_name": "lk-geimfari/elizabeth",
"id": "5171b4cf3643362cefea023cf2fc5603fab307a7",
"size": "2876",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_providers/test_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1944"
},
{
"name": "Python",
"bytes": "293974"
}
],
"symlink_target": ""
} |
'''This module will help in automating process of
execution of spider and will set appropriate setting.
You can set User Agent to random value for every
time spider will crawl, this will reduce chance of
getting banned.
'''
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import scrapy, scraper, logging
from scraper.spiders import OptionValueSpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
class kAutoOptionValue:
def __init__(self, stockName, expiries, instrumentType):
self.stockName = stockName
self.expiries = expiries
self.instrumentType = instrumentType
# start logger
log = logging.getLogger(__name__)
def __call__(self):
# get_project_stting() will return project setting,which will be set as default setting in crawler
process = CrawlerProcess(get_project_settings())
# crawl will take Spider name with its *args
process.crawl(OptionValueSpider.kOptionValueSpider ,symbol=self.stockName, expiries=self.expiries, instrumentType=self.instrumentType)
# Everything is set to go and crawl.
process.start()
if __name__ == "__main__":
autoOptionValue = kAutoOptionValue("SBIN", ["Jan2018", "Feb2018"], "OPTSTK")
autoOptionValue()
| {
"content_hash": "e3a20d8193e0264c335d885493116c94",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 142,
"avg_line_length": 39.57142857142857,
"alnum_prop": 0.7025270758122744,
"repo_name": "puchchi/stock_scraper_latest",
"id": "5e6af6111651027d705d351c1bf61933e7686478",
"size": "1385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper/autoOptionValue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "89046"
}
],
"symlink_target": ""
} |
"""Unittests for pushimage.py"""
from __future__ import print_function
import mock
import os
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import gs
from chromite.lib import gs_unittest
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import signing
from chromite.scripts import pushimage
class InputInsnsTest(cros_test_lib.MockTestCase):
"""Tests for InputInsns"""
def setUp(self):
self.StartPatcher(gs_unittest.GSContextMock())
def testBasic(self):
"""Simple smoke test"""
insns = pushimage.InputInsns('test.board')
insns.GetInsnFile('recovery')
self.assertEqual(insns.GetChannels(), ['dev', 'canary'])
self.assertEqual(insns.GetKeysets(), ['stumpy-mp-v3'])
def testGetInsnFile(self):
"""Verify various inputs result in right insns path"""
testdata = (
('UPPER_CAPS', 'UPPER_CAPS'),
('recovery', 'test.board'),
('firmware', 'test.board.firmware'),
('factory', 'test.board.factory'),
)
insns = pushimage.InputInsns('test.board')
for image_type, filename in testdata:
ret = insns.GetInsnFile(image_type)
self.assertEqual(os.path.basename(ret), '%s.instructions' % (filename))
def testSplitCfgField(self):
"""Verify splitting behavior behaves"""
testdata = (
('', []),
('a b c', ['a', 'b', 'c']),
('a, b', ['a', 'b']),
('a,b', ['a', 'b']),
('a,\tb', ['a', 'b']),
('a\tb', ['a', 'b']),
)
for val, exp in testdata:
ret = pushimage.InputInsns.SplitCfgField(val)
self.assertEqual(ret, exp)
def testOutputInsnsBasic(self):
"""Verify output instructions are sane"""
exp_content = """[insns]
keyset = stumpy-mp-v3
channel = dev canary
chromeos_shell = false
ensure_no_password = true
firmware_update = true
security_checks = true
create_nplusone = true
[general]
"""
insns = pushimage.InputInsns('test.board')
m = self.PatchObject(osutils, 'WriteFile')
insns.OutputInsns('recovery', '/bogus', {}, {})
self.assertTrue(m.called)
content = m.call_args_list[0][0][1]
self.assertEqual(content.rstrip(), exp_content.rstrip())
def testOutputInsnsReplacements(self):
"""Verify output instructions can be updated"""
exp_content = """[insns]
keyset = batman
channel = dev
chromeos_shell = false
ensure_no_password = true
firmware_update = true
security_checks = true
create_nplusone = true
[general]
board = board
config_board = test.board
"""
sect_insns = {
'channel': 'dev',
'keyset': 'batman',
}
sect_general = {
'config_board': 'test.board',
'board': 'board',
}
insns = pushimage.InputInsns('test.board')
m = self.PatchObject(osutils, 'WriteFile')
insns.OutputInsns('recovery', '/a/file', sect_insns, sect_general)
self.assertTrue(m.called)
content = m.call_args_list[0][0][1]
self.assertEqual(content.rstrip(), exp_content.rstrip())
class MarkImageToBeSignedTest(gs_unittest.AbstractGSContextTest):
"""Tests for MarkImageToBeSigned()"""
def setUp(self):
# Minor optimization -- we call this for logging purposes in the main
# code, but don't really care about it for testing. It just slows us.
self.PatchObject(cros_build_lib, 'MachineDetails', return_value='1234\n')
def testBasic(self):
"""Simple smoke test"""
tbs_base = 'gs://some-bucket'
insns_path = 'chan/board/ver/file.instructions'
tbs_file = '%s/tobesigned/90,chan,board,ver,file.instructions' % tbs_base
ret = pushimage.MarkImageToBeSigned(self.ctx, tbs_base, insns_path, 90)
self.assertEqual(ret, tbs_file)
def testPriority(self):
"""Verify diff priority values get used correctly"""
for prio, sprio in ((0, '00'), (9, '09'), (35, '35'), (99, '99')):
ret = pushimage.MarkImageToBeSigned(self.ctx, '', '', prio)
self.assertEquals(ret, '/tobesigned/%s,' % sprio)
def testBadPriority(self):
"""Verify we reject bad priority values"""
for prio in (-10, -1, 100, 91239):
self.assertRaises(ValueError, pushimage.MarkImageToBeSigned, self.ctx,
'', '', prio)
def testTbsUpload(self):
"""Make sure we actually try to upload the file"""
pushimage.MarkImageToBeSigned(self.ctx, '', '', 50)
self.gs_mock.assertCommandContains(['cp', '--'])
class PushImageTests(gs_unittest.AbstractGSContextTest):
"""Tests for PushImage()"""
def setUp(self):
self.mark_mock = self.PatchObject(pushimage, 'MarkImageToBeSigned')
def testBasic(self):
"""Simple smoke test"""
EXPECTED = {
'canary': [
('gs://chromeos-releases/canary-channel/test.board-hi/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board-hi.instructions')],
'dev': [
('gs://chromeos-releases/dev-channel/test.board-hi/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board-hi.instructions')],
}
with mock.patch.object(gs.GSContext, 'Exists', return_value=True):
urls = pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
profile='hi')
self.assertEqual(urls, EXPECTED)
def testBasicMock(self):
"""Simple smoke test in mock mode"""
with mock.patch.object(gs.GSContext, 'Exists', return_value=True):
pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
dry_run=True, mock=True)
def testBadVersion(self):
"""Make sure we barf on bad version strings"""
self.assertRaises(ValueError, pushimage.PushImage, '', '', 'asdf')
def testNoInsns(self):
"""Boards w/out insn files should get skipped"""
urls = pushimage.PushImage('/src', 'a bad bad board', 'R34-5126.0.0')
self.assertEqual(self.gs_mock.call_count, 0)
self.assertEqual(urls, None)
def testSignTypesRecovery(self):
"""Only sign the requested recovery type"""
EXPECTED = {
'canary': [
('gs://chromeos-releases/canary-channel/test.board/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board.instructions')],
'dev': [
('gs://chromeos-releases/dev-channel/test.board/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board.instructions')],
}
urls = pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
sign_types=['recovery'])
self.assertEqual(self.gs_mock.call_count, 18)
self.assertTrue(self.mark_mock.called)
self.assertEqual(urls, EXPECTED)
def testSignTypesNone(self):
"""Verify nothing is signed when we request an unavailable type"""
urls = pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
sign_types=['nononononono'])
self.assertEqual(self.gs_mock.call_count, 16)
self.assertFalse(self.mark_mock.called)
self.assertEqual(urls, {})
def testGsError(self):
"""Verify random GS errors don't make us blow up entirely"""
self.gs_mock.AddCmdResult(partial_mock.In('stat'), returncode=1,
output='gobblety gook\n')
with cros_test_lib.LoggingCapturer('chromite'):
self.assertRaises(pushimage.PushError, pushimage.PushImage, '/src',
'test.board', 'R34-5126.0.0')
class MainTests(cros_test_lib.MockTestCase):
"""Tests for main()"""
def setUp(self):
self.PatchObject(pushimage, 'PushImage')
def testBasic(self):
"""Simple smoke test"""
pushimage.main(['--board', 'test.board', '/src', '--yes'])
def main(_argv):
# Use our local copy of insns for testing as the main one is not
# available in the public manifest.
signing.INPUT_INSN_DIR = signing.TEST_INPUT_INSN_DIR
# Run the tests.
cros_test_lib.main(level='info', module=__name__)
| {
"content_hash": "5d99b25dccdec5c5f8d555066423ae5e",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 77,
"avg_line_length": 33.376068376068375,
"alnum_prop": 0.6385403329065301,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "76618e4613f47296713a44b5efa2e4cc8e963162",
"size": "7980",
"binary": false,
"copies": "1",
"ref": "refs/heads/ubuntu/precise",
"path": "third_party/chromite/scripts/pushimage_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
import sys,os
hashMap = dict()
for line in sys.stdin:
data = line.split("\t")
tableName = data[0]
columnName = data[1].rstrip('\n')
if tableName == "TABLE_NAME":
continue
if tableName in hashMap:
hashMap[tableName] = hashMap[tableName] + [columnName]
else:
hashMap[tableName] = [columnName]
for key in hashMap.keys():
printString = ""
currList = hashMap[key]
for index,colName in enumerate(currList):
printString += colName
if index < len(currList)-1:
printString += ", "
print(key + "(" + printString + ")")
print("")
print("")
| {
"content_hash": "a66f52769664131934e6387a4eabab5a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 21.233333333333334,
"alnum_prop": 0.5777080062794349,
"repo_name": "hansfilipelo/tddd37",
"id": "0df7a98127efb93766d69bab5a753a2f52b14792",
"size": "661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab1/convert_rel_model.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "661"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from Course.models import Enrollment, Assignment
admin.site.register(Enrollment)
admin.site.register(Assignment) | {
"content_hash": "9f0e05e7e00ff0acefb29f7b11fdbbcd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 48,
"avg_line_length": 24.5,
"alnum_prop": 0.8435374149659864,
"repo_name": "RedBulli/CourseDeadlines",
"id": "1886301e0a5189ee9265dbaa6313298549f210e3",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Course/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "158680"
},
{
"name": "JavaScript",
"bytes": "196393"
},
{
"name": "Python",
"bytes": "14339"
},
{
"name": "Scala",
"bytes": "232"
}
],
"symlink_target": ""
} |
import unittest
from flask.ext.toybox.sqlalchemy import SAModelMixin, SAModelView, SACollectionView, PaginableByNumber, QueryFiltering
from flask.ext.toybox.permissions import make_I
from flask.ext.toybox import ToyBox
from flask import Flask, g, request
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session, Session, relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
import json
Base = declarative_base()
I = make_I()
class Company(Base, SAModelMixin):
__tablename__ = "test_companies"
id = Column(Integer, primary_key=True)
name = Column(String, info=I("r:all,w:admin+"))
is_expected = Column(Boolean, info=I("r:all,w:admin+"))
def __init__(self, name):
self.name = name
def check_instance_permissions(self, user=None):
return set(["authenticated"]) if request.args.get("auth", "") != "" else set(["anonymous"])
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
class User(Base, SAModelMixin):
__tablename__ = "test_users"
id = Column(Integer, primary_key=True)
username = Column(String, info=I("r:all,w:none"))
fullname = Column(String, info=I("rw:all"))
email = Column(String, info=I("rw:owner+"))
badges = Column(Integer, default=0, info=I("r:all,w:staff+"))
is_active = Column(Boolean, default=True, info=I("r:all,w:staff+"))
is_staff = Column(Boolean, default=False, info=I("r:staff+,w:admin+"))
company_id = Column(Integer, ForeignKey(Company.id), info=I("rw:none"))
company = relationship("Company", info=I("r:all,w:none", embed_only=["name"], embed_href="/companies/{0.id}"))
def __init__(self, username, fullname, email, **kwargs):
self.username = username
self.fullname = fullname
self.email = email
for name, value in kwargs.items():
setattr(self, name, value)
def check_instance_permissions(self, user=None):
# Very silly a12n.
auth = request.args.get("auth", "")
if auth != "":
user = Session.object_session(self).query(User).filter_by(username=auth).one()
return set(["owner"]) if user.id == self.id else set(["authenticated"])
else:
return set(["anonymous"])
def __repr__(self):
return "<{0}: {1}, {2}>".format(self.__class__.__name__,
self.username, self.fullname)
class SQLAlchemyModelTestCase(unittest.TestCase):
def setUp(self):
# Set up SQLAlchemy models
engine = create_engine('sqlite:///:memory:', echo=False)
Base.metadata.create_all(engine)
ScopedSession = scoped_session(sessionmaker(bind=engine))
# Create some models
db_session = ScopedSession()
companies = [Company("The Spanish Inquisition"), Company("The Vikings")]
for company in companies:
db_session.add(company)
db_session.add(User("spam", "Spam", "spam@users.example.org", badges=1, is_staff=True, company=companies[0]))
db_session.add(User("ham", "Ham", "ham@users.example.org", is_active=False, company=companies[1]))
db_session.add(User("eggs", "Eggs", "eggs@users.example.org", badges=2, is_staff=True))
db_session.commit()
self.db_session = db_session
# Set up Flask
app = Flask(__name__)
app.debug = True
self.real_app = app
# Set up ToyBox
toybox = ToyBox(app)
class UserView(SAModelView):
model = User
query_class = db_session.query
def save_object(self, obj):
# In a real code, this should be done by a middleware/wrapper.
# However, this is a test, so we simplify things a bit.
# Don't commit from here in production!
db_session.commit()
app.add_url_rule("/users/<username>", view_func=UserView.as_view("user"))
class UsersView(PaginableByNumber, QueryFiltering, SACollectionView):
model = User
query_class = db_session.query
order_by = "username"
app.add_url_rule("/users/", view_func=UsersView.as_view("users"))
self.app = app.test_client()
def test_get(self):
response = self.app.get("/users/spam", headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200, response.status)
reference = {"username": "spam", "fullname": "Spam"}
data = json.loads(response.data)
for k, v in reference.items():
self.assertEqual(data[k], v)
self.assertTrue("company" in data)
self.assertTrue("href" in data["company"])
self.assertTrue(data["company"]["href"].startswith("/companies/"))
self.assertTrue("name" in data["company"])
self.assertEqual(data["company"]["name"], "The Spanish Inquisition")
etag = response.headers.get("ETag", None)
self.assertTrue(etag is not None)
response = self.app.get("/users/spam", headers={
"Accept": "application/json",
"If-None-Match": etag
})
self.assertEqual(response.status_code, 304, response.status)
def test_get_collection(self):
response = self.app.get("/users/", headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200, response.status)
reference = [{u"username": u"spam", u"fullname": u"Spam"},
{u"username": u"ham", u"fullname": u"Ham"},
{u"username": u"eggs", u"fullname": u"Eggs"}]
data = json.loads(response.data)
for ref_item in reference:
self.assertTrue(any(all(data_item[k] == v for k, v in ref_item.items()) for data_item in data), "Not found: {0!r}".format(ref_item))
for data_item in data:
self.assertTrue(data_item.get("email", None) is None)
etag = response.headers.get("ETag", None)
self.assertTrue(etag is not None)
response = self.app.get("/users/", headers={
"Accept": "application/json",
"If-None-Match": etag
})
self.assertEqual(response.status_code, 304, response.status)
def test_collection_pagination(self):
response = self.app.get("/users/", headers={"Accept": "application/json", "Range": "items=1-10"})
self.assertEqual(response.status_code, 206, response.status)
content_range = response.headers.get("Content-Range", "")
self.assertTrue(content_range.startswith("items 1-2/"), content_range)
data = json.loads(response.data)
usernames = [data_item.get("username", None) for data_item in data]
self.assertEqual(usernames, ["ham", "spam"])
def test_collection_filtering(self):
cases = [
# This also tests whenever is_admin will be ignored, as it is not readable.
("is_staff=true&is_admin=true&spam=spam", set(["spam", "eggs"])),
("badges=lt:2", set(["ham", "spam"])),
("badges=eq:0&is_active=false", set(["ham"])),
("badges=ne:0&is_active=false", set()),
("is_staff=true&is_staff=false", set()),
("badges=ne:null", set(["spam", "ham", "eggs"])),
("is_staff=\"true\"", set()), # XXX: Should it return empty set or error?
("is_staff=invalid", set())
]
for query, expected in cases:
response = self.app.get("/users/?" + query, headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200, response.status)
data = json.loads(response.data)
usernames = set([data_item.get("username", None) for data_item in data])
self.assertEqual(usernames, expected)
def test_collection_is_readonly(self):
for method in ("put", "patch", "delete"):
response = getattr(self.app, method)("/users/", headers={"Accept": "application/json"})
self.assertEquals(response.status_code, 405, "Method {0} yielded {1}".format(method.upper(), response.status))
def test_get_collection_permissions(self):
for username in ("", "spam", "ham", "eggs"):
response = self.app.get("/users/?auth=" + username, headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200, response.status)
data = json.loads(response.data)
for data_item in data:
if data_item["username"] == username:
self.assertEqual(data_item.get("email", None), username + "@users.example.org")
else:
self.assertTrue(data_item.get("email", None) is None)
def test_patch(self):
response = self.app.get("/users/eggs",
headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertTrue("username" in data)
self.assertTrue("fullname" in data)
self.assertEqual(data["username"], "eggs")
etag = response.headers.get("ETag", None)
self.assertTrue(etag is not None)
response = self.app.patch(
"/users/eggs",
headers={"Accept": "application/json",},
data=json.dumps({"fullname": "Python Eggs"}),
content_type="application/json"
)
self.assertEqual(response.status_code, 428, response.status)
response = self.app.patch(
"/users/eggs",
headers={
"Accept": "application/json",
"If-Match": etag
},
data = json.dumps({"fullname": "Python Eggs"}),
content_type="application/json"
)
self.assertEqual(response.status_code, 204, response.data)
response = self.app.get("/users/eggs",
headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200, response.status)
data = json.loads(response.data)
self.assertTrue("username" in data)
self.assertTrue("fullname" in data)
self.assertEqual(data["fullname"], "Python Eggs")
def test_patch_non_writeable(self):
response = self.app.get("/users/eggs",
headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200)
etag = response.headers.get("ETag", None)
self.assertTrue(etag is not None)
response = self.app.patch(
"/users/eggs",
headers={
"Accept": "application/json",
"If-Match": etag
},
data = json.dumps({"username": "eggs2"}),
content_type="application/json"
)
self.assertEqual(response.status_code, 422, response.data)
response = self.app.get("/users/eggs",
headers={"Accept": "application/json"})
self.assertEqual(response.status_code, 200, response.status) | {
"content_hash": "54a31b5d7ed001092c50023db49825e9",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 144,
"avg_line_length": 43.17829457364341,
"alnum_prop": 0.5914721723518851,
"repo_name": "drdaeman/flask-toybox",
"id": "1cd16bcf4620a8beaf72c42185b931a975bd3e12",
"size": "11140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sqlalchemy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57890"
}
],
"symlink_target": ""
} |
class Options:
def __init__(self, restoreWindow, restoreFont, restoreSession, blink):
self.restoreWindow = restoreWindow
self.restoreFont = restoreFont
self.restoreSession = restoreSession
self.blink = blink
def __str__(self):
return ("restoreWindow={} restoreFont={} restoreSession={} "
"blink={}".format(self.restoreWindow, self.restoreFont,
self.restoreSession, self.blink))
| {
"content_hash": "6fbf8872384582354b58f0a3494e8e7a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 31.866666666666667,
"alnum_prop": 0.6108786610878661,
"repo_name": "nwiizo/workspace_2017",
"id": "dff9d9d4dcbd88150c99698d223c232b7b890a7f",
"size": "1096",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pipng/texteditor2/Options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
} |
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Goodbye(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Goodbye()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsGoodbye(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Goodbye
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Goodbye
def Reason(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Goodbye
def Message(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Goodbye
def Resumable(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def GoodbyeStart(builder): builder.StartObject(3)
def Start(builder):
return GoodbyeStart(builder)
def GoodbyeAddReason(builder, reason): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(reason), 0)
def AddReason(builder, reason):
return GoodbyeAddReason(builder, reason)
def GoodbyeAddMessage(builder, message): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(message), 0)
def AddMessage(builder, message):
return GoodbyeAddMessage(builder, message)
def GoodbyeAddResumable(builder, resumable): builder.PrependBoolSlot(2, resumable, 0)
def AddResumable(builder, resumable):
return GoodbyeAddResumable(builder, resumable)
def GoodbyeEnd(builder): return builder.EndObject()
def End(builder):
return GoodbyeEnd(builder) | {
"content_hash": "2123dcde9953aab222b72ef6a3eb3cc7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 139,
"avg_line_length": 36.206896551724135,
"alnum_prop": 0.6871428571428572,
"repo_name": "oberstet/autobahn-python",
"id": "e9de961913eef94aa1439545e1a69f2becffbc5b",
"size": "2190",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "autobahn/wamp/gen/wamp/proto/Goodbye.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17063"
},
{
"name": "Jinja",
"bytes": "54604"
},
{
"name": "Makefile",
"bytes": "31543"
},
{
"name": "Python",
"bytes": "2408842"
},
{
"name": "Shell",
"bytes": "3825"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.