text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import json
import mimetypes
import os
import re
import sys
from copy import copy
from functools import partial
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?')
# JSON Vendor Tree spec: https://tools.ietf.org/html/rfc6838#section-3.2
JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(vnd\..+\+)?json')
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode('iso-8859-1')
def get(self, path, data=None, secure=False, **extra):
"""Construct a GET request."""
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"""Construct a POST request."""
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"""Construct a TRACE request."""
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PUT request."""
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PATCH request."""
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a DELETE request."""
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': method,
'SERVER_PORT': '443' if secure else '80',
'wsgi.url_scheme': 'https' if secure else 'http',
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': content_type,
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = force_bytes(parsed[4]).decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super().__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def request(self, **request):
"""
The master request method. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
_, exc_value, _ = self.exc_info
self.exc_info = None
raise exc_value
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using GET."""
response = super().get(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""Request a response from the server using POST."""
response = super().post(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using HEAD."""
response = super().head(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Request a response from the server using OPTIONS."""
response = super().options(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PUT."""
response = super().put(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PATCH."""
response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a DELETE request to the server."""
response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""Send a TRACE request to the server."""
response = super().trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible; False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, 'get_user'):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, '_json'):
if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
response._json = json.loads(response.content.decode(), **extra)
return response._json
def _handle_redirects(self, response, **extra):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
# Prepend the request path to handle relative path redirects
path = url.path
if not path.startswith('/'):
path = urljoin(response.request['PATH_INFO'], path)
response = self.get(path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
|
{
"content_hash": "0a132932fbb32f6d57576fa290b6c9d3",
"timestamp": "",
"source": "github",
"line_count": 691,
"max_line_length": 112,
"avg_line_length": 38.894356005788715,
"alnum_prop": 0.6063402292007739,
"repo_name": "uranusjr/django",
"id": "6406f8309e7e28f9f88847f7cad7b611335c9de2",
"size": "26876",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/test/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84168"
},
{
"name": "HTML",
"bytes": "219466"
},
{
"name": "JavaScript",
"bytes": "255420"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12238521"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import logging
import os
import pathlib
import time
import attr
import pytest
import salt.channel.client
import salt.config
import salt.master
import salt.utils.files
import salt.utils.platform
import salt.utils.user
log = logging.getLogger(__name__)
@attr.s(slots=True, hash=True, frozen=True)
class UserInfo:
master_config = attr.ib(repr=False, hash=False)
username = attr.ib(init=False)
key_file = attr.ib(init=False)
key_path = attr.ib(init=False)
key = attr.ib(init=False, repr=False)
@username.default
def _default_username(self):
if not salt.utils.platform.is_windows():
return self.master_config["user"]
user = salt.utils.user.get_specific_user().replace("\\", "_")
if user.startswith("sudo_"):
user = user.split("sudo_")[-1]
return user
@key_file.default
def _default_key_file(self):
return ".{}_key".format(self.username)
@key_path.default
def _default_key_path(self):
return pathlib.Path(self.master_config["cachedir"]) / self.key_file
@key.default
def _default_key(self):
with salt.utils.files.fopen(str(self.key_path)) as keyfd:
return keyfd.read()
@pytest.fixture(scope="module")
def user_info(salt_master):
return UserInfo(salt_master.config)
@pytest.fixture(scope="module")
def client_config(salt_minion, salt_master):
opts = salt_minion.config.copy()
opts.update(
{
"id": "root",
"transport": salt_master.config["transport"],
"auth_tries": 1,
"auth_timeout": 5,
"master_ip": "127.0.0.1",
"master_port": salt_master.config["ret_port"],
"master_uri": "tcp://127.0.0.1:{}".format(salt_master.config["ret_port"]),
}
)
return opts
@pytest.fixture
def clear_channel(client_config):
with salt.channel.client.ReqChannel.factory(
client_config, crypt="clear"
) as channel:
yield channel
def test_auth_info_not_allowed(clear_channel, user_info):
assert hasattr(salt.master.ClearFuncs, "_prep_auth_info")
msg = {"cmd": "_prep_auth_info"}
rets = clear_channel.send(msg, timeout=15)
ret_key = None
for ret in rets:
try:
ret_key = ret[user_info.username]
log.warning("User Key retrieved!!!:\n%s", ret)
break
except (TypeError, KeyError):
pass
assert ret_key != user_info.key, "Able to retrieve user key"
def test_pub_not_allowed(
salt_master,
clear_channel,
tmp_path,
event_listener,
salt_minion,
user_info,
caplog,
):
assert hasattr(salt.master.ClearFuncs, "_send_pub")
tempfile = tmp_path / "evil_file"
assert not tempfile.exists()
jid = "202003100000000001"
msg = {
"cmd": "_send_pub",
"fun": "file.write",
"jid": jid,
"arg": [str(tempfile), "evil contents"],
"kwargs": {"show_jid": False, "show_timeout": False},
"ret": "",
"tgt": salt_minion.id,
"tgt_type": "glob",
"user": user_info.username,
}
timeout = 60
start_time = time.time()
expected_log_message = "Requested method not exposed: _send_pub"
with caplog.at_level(logging.ERROR):
clear_channel.send(msg, timeout=15)
stop_time = start_time + timeout
seen_records = []
match_record = None
while True:
if match_record is not None:
break
if time.time() > stop_time:
pytest.fail(
"Took more than {} seconds to confirm the presence of {!r} in the"
" logs".format(timeout, expected_log_message)
)
for record in caplog.records:
if record in seen_records:
continue
seen_records.append(record)
if expected_log_message in str(record):
match_record = True
break
time.sleep(0.5)
# If we got the log message, we shouldn't get anything from the event bus
expected_tag = "salt/job/{}/*".format(jid)
event_pattern = (salt_master.id, expected_tag)
events = event_listener.get_events([event_pattern], after_time=start_time)
for event in events:
pytest.fail("This event should't have gone through: {}".format(event))
assert not tempfile.exists(), "Evil file created"
def test_clearfuncs_config(salt_master, clear_channel, user_info):
default_include_dir = pathlib.Path(salt_master.config["default_include"]).parent
good_file_path = (
pathlib.Path(salt_master.config_dir) / default_include_dir / "good.conf"
)
evil_file_path = pathlib.Path(salt_master.config_dir) / "evil.conf"
assert not good_file_path.exists()
assert not evil_file_path.exists()
# assert good behavior
good_msg = {
"key": user_info.key,
"cmd": "wheel",
"fun": "config.update_config",
"file_name": "good",
"yaml_contents": "win: true",
}
ret = clear_channel.send(good_msg, timeout=5)
assert "Wrote" in ret["data"]["return"]
assert good_file_path.exists()
good_file_path.unlink()
try:
evil_msg = {
"key": user_info.key,
"cmd": "wheel",
"fun": "config.update_config",
"file_name": "../evil",
"yaml_contents": "win: true",
}
ret = clear_channel.send(evil_msg, timeout=5)
assert not evil_file_path.exists(), "Wrote file via directory traversal"
assert ret["data"]["return"] == "Invalid path"
finally:
if evil_file_path.exists():
evil_file_path.unlink()
def test_fileroots_write(clear_channel, user_info, salt_master):
state_tree_root_dir = pathlib.Path(salt_master.config["file_roots"]["base"][0])
good_target = state_tree_root_dir / "good.txt"
target_dir = state_tree_root_dir.parent
bad_target = target_dir / "pwn.txt"
# Good behaviour
try:
good_msg = {
"key": user_info.key,
"cmd": "wheel",
"fun": "file_roots.write",
"data": "win",
"path": "good.txt",
"saltenv": "base",
}
ret = clear_channel.send(good_msg, timeout=5)
assert good_target.exists()
finally:
if good_target.exists():
good_target.unlink()
# Bad behaviour
try:
bad_msg = {
"key": user_info.key,
"cmd": "wheel",
"fun": "file_roots.write",
"data": "win",
"path": os.path.join("..", "pwn.txt"),
"saltenv": "base",
}
clear_channel.send(bad_msg, timeout=5)
assert not bad_target.exists(), "Wrote file via directory traversal"
finally:
if bad_target.exists():
bad_target.unlink()
def test_fileroots_read(clear_channel, user_info, salt_master):
state_tree_root_dir = pathlib.Path(salt_master.config["file_roots"]["base"][0])
# We can't use pathlib.Path.relative_to is does not behave the same as os.path.relpath
# readpath = user_info.key_path.relative_to(state_tree_root_dir)
readpath = os.path.relpath(str(user_info.key_path), str(state_tree_root_dir))
relative_key_path = state_tree_root_dir / readpath
log.debug("Master root_dir: %s", salt_master.config["root_dir"])
log.debug("File Root: %s", state_tree_root_dir)
log.debug("Key Path: %s", user_info.key_path)
log.debug("Read Path: %s", readpath)
log.debug("Relative Key Path: %s", relative_key_path)
log.debug("Absolute Read Path: %s", relative_key_path.resolve())
# If this asserion fails the test may need to be re-written
assert relative_key_path.resolve() == user_info.key_path
msg = {
"key": user_info.key,
"cmd": "wheel",
"fun": "file_roots.read",
"path": readpath,
"saltenv": "base",
}
ret = clear_channel.send(msg, timeout=5)
try:
# When vulnerable this assertion will fail.
assert (
list(ret["data"]["return"][0].items())[0][1] != user_info.key
), "Read file via directory traversal"
except IndexError:
pass
# If the vulnerability is fixed, no data will be returned.
assert ret["data"]["return"] == []
def test_token(salt_master, salt_minion, clear_channel):
tokensdir = pathlib.Path(salt_master.config["cachedir"]) / "tokens"
assert tokensdir.is_dir()
msg = {
"arg": [],
"cmd": "get_token",
"token": str(pathlib.Path("..") / "minions" / salt_minion.id / "data.p"),
}
ret = clear_channel.send(msg, timeout=5)
assert "pillar" not in ret, "Read minion data via directory traversal"
|
{
"content_hash": "f3e008379daf2c9cb12b152acc73112e",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 90,
"avg_line_length": 31.909420289855074,
"alnum_prop": 0.5847621210400817,
"repo_name": "saltstack/salt",
"id": "e235864d088dcd277b1f61bdacbe294dcf00f3a3",
"size": "8807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytests/integration/master/test_clear_funcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from functools import wraps
from uuid import UUID
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
from django.shortcuts import resolve_url, redirect
from fir_irma.models import IrmaScan
from fir_irma.utils import process_error, ERROR_NOT_FOUND, ERROR_UNAUTHORIZED
def user_is_owner_or_privileged(login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user is the owner of the scan or privileged,,
redirecting to the log-in page if necessary. The request must have a scan_id parameter.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
if 'scan_id' in kwargs:
scan_id = UUID(kwargs.get('scan_id'))
try:
scan = IrmaScan.objects.get(irma_scan=scan_id)
except IrmaScan.DoesNotExist:
return process_error(request, error=ERROR_NOT_FOUND)
if (request.user == scan.user and request.user.has_perm('fir_irma.scan_files')) or \
request.user.has_perm('fir_irma.read_all_results'):
kwargs['scan'] = scan
return view_func(request, *args, **kwargs)
elif settings.IRMA_ANONYMOUS_SCAN and settings.IRMA_IS_STANDALONE:
if 'scan_id' in kwargs:
scan_id = UUID(kwargs.get('scan_id'))
client_ip = get_ip(request)
try:
scan = IrmaScan.objects.get(irma_scan=scan_id, client_ip=client_ip)
kwargs['scan'] = scan
return view_func(request, *args, **kwargs)
except IrmaScan.DoesNotExist:
return process_error(request, error=ERROR_NOT_FOUND)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
_wrapped_view.csrf_exempt = True
return _wrapped_view
return decorator
def login_and_perm_required(perm, login_url=None, unprivileged_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user is authenticated and has permission,
redirecting to the log-in page if necessary.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
if not isinstance(perm, (list, tuple)):
perms = (perm, )
else:
perms = perm
if request.user.has_perms(perms):
return view_func(request, *args, **kwargs)
if unprivileged_url is not None:
return redirect(unprivileged_url)
return process_error(request, error=ERROR_UNAUTHORIZED)
elif settings.IRMA_ANONYMOUS_SCAN and settings.IRMA_IS_STANDALONE:
return view_func(request, *args, **kwargs)
else:
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
_wrapped_view.csrf_exempt = True
return _wrapped_view
return decorator
|
{
"content_hash": "a9dc9414b53022db50b3c86788f5b2a5",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 114,
"avg_line_length": 50.16326530612245,
"alnum_prop": 0.5921480878763222,
"repo_name": "gcrahay/fir_irma_plugin",
"id": "c1f802052fb3a4b6cb89b1af5848848a941b5570",
"size": "4916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fir_irma/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "141690"
},
{
"name": "HTML",
"bytes": "51978"
},
{
"name": "JavaScript",
"bytes": "534005"
},
{
"name": "Python",
"bytes": "35415"
},
{
"name": "Shell",
"bytes": "157"
}
],
"symlink_target": ""
}
|
from cairosvg import svg2png
from colour import Color
from flask import (abort, Blueprint, send_from_directory, render_template,
Response)
from findaconf import app
from random import choice, randrange
files = Blueprint('file_routes', __name__, static_folder='')
@files.route('/poster.png', methods=['GET'])
def poster():
# randon backgorund color
rand_rgb = tuple([(randrange(97, 160) / 255.0) for i in range(3)])
bg = Color(rgb=rand_rgb)
# get foreground
fg = Color(bg.hex)
variation = randrange(15, 60)
fg.hue = choice([variation, variation * -1])
# random alpha
alpha = randrange(4, 7) / 10.0
# create image
svg = render_template('poster.svg', bg=bg.hex, fg=fg.hex, alpha=alpha)
return Response(svg2png(bytestring=svg), mimetype='image/png')
@files.route('/favicon.ico')
def favicon():
imgs_path = app.config['SITE_STATIC'].child('favicons')
return send_from_directory(imgs_path, 'favicon.ico')
@files.route('/robots.txt')
def robots():
return send_from_directory(app.config['SITE_STATIC'], 'robots.txt')
@files.route('/assets/foundation-icons.<extension>')
@files.route('/assets/webassets-external/foundation-icons.<extension>')
def foundation_icon(extension):
bower_path = app.config['BASEDIR'].child('findaconf', 'bower')
directory = 'foundation-icon-fonts'
file_name = 'foundation-icons.{}'.format(extension)
if bower_path.child(directory, file_name).exists():
return send_from_directory(bower_path.child(directory), file_name)
else:
abort(404)
|
{
"content_hash": "69ed917b8a2ea3a70c487ddd5763194b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 74,
"avg_line_length": 30.423076923076923,
"alnum_prop": 0.6769911504424779,
"repo_name": "cuducos/findaconf",
"id": "32b056a1abe8d1816a913952623f9d432fa6be6d",
"size": "1599",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "findaconf/blueprints/files/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14246"
},
{
"name": "CoffeeScript",
"bytes": "1965"
},
{
"name": "HTML",
"bytes": "11985"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "69103"
},
{
"name": "Shell",
"bytes": "3082"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table('todo_project', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('project', self.gf('django.db.models.fields.CharField')(max_length=60)),
))
db.send_create_signal('todo', ['Project'])
# Adding model 'Tag'
db.create_table('todo_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('tag', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal('todo', ['Tag'])
# Adding model 'Type'
db.create_table('todo_type', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal('todo', ['Type'])
# Adding field 'Item.type'
db.add_column('todo_item', 'type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='items', null=True, to=orm['todo.Type']), keep_default=False)
# Adding field 'Item.project'
db.add_column('todo_item', 'project', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='items', null=True, to=orm['todo.Project']), keep_default=False)
# Adding M2M table for field tags on 'Item'
db.create_table('todo_item_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('item', models.ForeignKey(orm['todo.item'], null=False)),
('tag', models.ForeignKey(orm['todo.tag'], null=False))
))
db.create_unique('todo_item_tags', ['item_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table('todo_project')
# Deleting model 'Tag'
db.delete_table('todo_tag')
# Deleting model 'Type'
db.delete_table('todo_type')
# Deleting field 'Item.type'
db.delete_column('todo_item', 'type_id')
# Deleting field 'Item.project'
db.delete_column('todo_item', 'project_id')
# Removing M2M table for field tags on 'Item'
db.delete_table('todo_item_tags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'todo.datetime': {
'Meta': {'object_name': 'DateTime'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'todo.item': {
'Meta': {'object_name': 'Item'},
'created': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['todo.DateTime']", 'null': 'True', 'blank': 'True'}),
'difficulty': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '3000', 'blank': 'True'}),
'onhold': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'to': "orm['todo.Project']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['todo.Tag']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'to': "orm['todo.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'to': "orm['auth.User']"})
},
'todo.project': {
'Meta': {'object_name': 'Project'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'todo.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'todo.type': {
'Meta': {'object_name': 'Type'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['todo']
|
{
"content_hash": "993b264748577e7103622db849c1d468",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 189,
"avg_line_length": 61.04054054054054,
"alnum_prop": 0.5520256807615674,
"repo_name": "pythonbyexample/PBE",
"id": "638f4abae6b1b4886a4d86f47c3095925dd44993",
"size": "9052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbe/todo/migrations/0004_auto__add_project__add_tag__add_type__add_field_item_type__add_field_i.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "251897"
},
{
"name": "CSS",
"bytes": "90956"
},
{
"name": "JavaScript",
"bytes": "85995"
},
{
"name": "Python",
"bytes": "1255167"
},
{
"name": "Shell",
"bytes": "82"
},
{
"name": "VimL",
"bytes": "46347"
}
],
"symlink_target": ""
}
|
'''
Created on 26 Oct. 2018
@author: alex
'''
from datetime import datetime
def date_string2datetime(date_string):
'''
Helper function to convert date string in one of several possible formats to a datetime object
@param date_string: date string in one of several possible formats
@return datetime object
'''
#?
DATE_FORMAT_LIST = ['%Y%m{}', '%Y-%m-{}', '{}/%m/%y', '{}/%m/%Y']
#If there is a date_string (start date or end date from argparse) then for each format type
# in the DATE FORMAT LIST try to use the datetime.strptime method.
#datetime.strptime() returns a datetime variable from the input parsed into the correct format.
# OR does it check if it is the correct format?
datetime_result = None
if date_string:
for format_string in DATE_FORMAT_LIST:
try:
datetime_result = datetime.strptime(date_string, format_string)
break
except ValueError:
pass
#if successful return the input as a datetime class object or None.
return datetime_result
|
{
"content_hash": "032f2c1b4701dd9b7d3c1e9b5ab7b334",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 99,
"avg_line_length": 35.78125,
"alnum_prop": 0.6218340611353712,
"repo_name": "alex-ip/geophys_utils",
"id": "af5e4c78f245fe6a721af70524cbb1be81dfb914",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geophys_utils/_datetime_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4276"
},
{
"name": "Python",
"bytes": "705777"
},
{
"name": "Shell",
"bytes": "4137"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boundaries', '0003_auto_20180413_2120'),
]
operations = [
migrations.AddField(
model_name='city',
name='country',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='boundaries.Country'),
preserve_default=False,
),
migrations.AddField(
model_name='city',
name='label',
field=models.CharField(default='Cidade', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='state',
name='label',
field=models.CharField(default='Estado', max_length=255),
preserve_default=False,
),
]
|
{
"content_hash": "f52a53a684899e8df65edcb936de3f70",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 117,
"avg_line_length": 28.78125,
"alnum_prop": 0.5819761129207384,
"repo_name": "hacklabr/geodjango-boundaries",
"id": "1a16e71d84f56b776cfe6ad263c5729ed860e5e6",
"size": "992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boundaries/migrations/0004_auto_20180501_2222.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1250"
},
{
"name": "Python",
"bytes": "14452"
}
],
"symlink_target": ""
}
|
import binascii
from botocore.utils import calculate_tree_hash
import click
__version__ = '0.1'
@click.command()
@click.option('--output', type=click.Choice(['ascii', 'binary']),
default='ascii', help='output format of Merkle Tree')
@click.argument('filename', type=click.File('rb'))
def cli(filename, output):
"""calculate Merkle Tree
"""
hash_value = calculate_tree_hash(filename)
if output == 'binary':
hash_value = binascii.unhexlify(hash_value)
click.echo(hash_value)
|
{
"content_hash": "9df8d917e25ad3639ee577fce323b9a9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 28.77777777777778,
"alnum_prop": 0.666023166023166,
"repo_name": "quiver/merkle_tree",
"id": "ea53e6c0e833a4d4cbb1d2b5ae98ddade6aaf3f9",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "merkle_tree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1470"
}
],
"symlink_target": ""
}
|
"""
WSGI config for zulip project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup() # We need to call setup to load applications.
# Because import_module does not correctly handle safe circular imports we
# need to import zerver.models first before the middleware tries to import it.
import zerver.models
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "232be63580e6812d805908bd1a198c26",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 38.97222222222222,
"alnum_prop": 0.7911617961511048,
"repo_name": "amanharitsh123/zulip",
"id": "9fbbafc5618b57e69c59e16102e661d68926402f",
"size": "1403",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "zproject/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432211"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "494378"
},
{
"name": "JavaScript",
"bytes": "2167185"
},
{
"name": "Nginx",
"bytes": "1485"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86921"
},
{
"name": "Python",
"bytes": "3792729"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "61752"
}
],
"symlink_target": ""
}
|
import __builtin__
openfiles = set()
oldfile = __builtin__.file
class newfile(oldfile):
def __init__(self, *args):
self.x = args[0]
#print "### OPENING %s ###" % str(self.x)
oldfile.__init__(self, *args)
openfiles.add(self)
def close(self):
#print "### CLOSING %s ###" % str(self.x)
oldfile.close(self)
openfiles.remove(self)
oldopen = __builtin__.open
def newopen(*args):
return newfile(*args)
__builtin__.file = newfile
__builtin__.open = newopen
def printOpenFiles():
print "### %d OPEN FILES: [%s]" % (len(openfiles), ", ".join(f.x for f in openfiles))
'''
# Use this for detailed single case testing
import unittest
import pentai.db.t_op_pos as tmod
suite = unittest.defaultTestLoader.loadTestsFromModule(tmod)
#suite = unittest.TestSuite()
#suite.addTest(tmod.O_PosPersistenceTest('test_add_omgd_to_db'))
unittest.TextTestRunner().run(suite)
'''
import pentai.t_all as t_m
t_m.main()
print "BEFORE CLOSE"
printOpenFiles()
import pentai.db.zodb_dict as z_m
z_m.close()
print "AFTER CLOSE"
printOpenFiles()
|
{
"content_hash": "c410cffce7df12e572475c64264579a3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 89,
"avg_line_length": 24.511111111111113,
"alnum_prop": 0.6436990027198549,
"repo_name": "cropleyb/pentai",
"id": "b36afc34843ae666fe7d115764d7178eb921f2fd",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.unittest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "52"
},
{
"name": "Python",
"bytes": "686123"
},
{
"name": "Shell",
"bytes": "10079"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ShowlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showline", parent_name="layout.ternary.aaxis", **kwargs
):
super(ShowlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
{
"content_hash": "a60d03bd86ef710a4a3f8b5ddf3498da",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 32.92307692307692,
"alnum_prop": 0.6121495327102804,
"repo_name": "plotly/plotly.py",
"id": "b48e7e1fa9fd78c05ebb1982fb1b9378ba1b9506",
"size": "428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/ternary/aaxis/_showline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, unicode_literals
__doc__="""
(GUI) Nudge-moves selected nodes by the values specified in the window. Vanilla required.
"""
import vanilla
import GlyphsApp
GSSteppingTextField = objc.lookUpClass("GSSteppingTextField")
class ArrowEditText (vanilla.EditText):
nsTextFieldClass = GSSteppingTextField
def _setCallback(self, callback):
super(ArrowEditText, self)._setCallback(callback)
if callback is not None and self._continuous:
self._nsObject.setContinuous_(True)
self._nsObject.setAction_(self._target.action_)
self._nsObject.setTarget_(self._target)
class ParametricEstimated( object ):
def __init__( self ):
# Window 'self.w':
edX = 40
edY = 17
txX = 20
txY = 17
slX = 200
spX = 10
spY = 10
btnY = 17
btnX = 60
windowWidth = spX*3+txX+edX+slX
windowHeight = spY*6+txY*2+btnY*4
windowWidthResize = 500
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Nudge-Move", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight ), # maximum size (for resizing)
autosaveName = "com.Tosche.Nudge-movebyNumericalValue(GUI).mainwindow" # stores last window position and size
)
# UI elements:
self.w.txX = vanilla.TextBox( (spX, spY, txX, txY), "X:", sizeStyle='small')
self.w.txY = vanilla.TextBox( (spX, spY*2+txY, txX, txY), "Y:", sizeStyle='small')
self.w.edX = ArrowEditText( (spX+txX, spY, edX, edY), "10", sizeStyle='small', callback=self.textChange)
self.w.edY = ArrowEditText( (spX+txX, spY*2+txY, edX, edY), "10", sizeStyle='small', callback=self.textChange)
self.w.slX = vanilla.Slider( (spX*2+txX+edX, spY, -spX, edY), sizeStyle='small', minValue=0, maxValue=50, value=10, callback=self.sliderChange)
self.w.slY = vanilla.Slider( (spX*2+txX+edX, spY*2+txY, -spX, edY), sizeStyle='small', minValue=0, maxValue=50, value=10, callback=self.sliderChange)
# Run Button:
self.w.tl = vanilla.SquareButton((spX, spY*3+txY*2, btnX, btnY), "↖", sizeStyle='small', callback=self.nudgeMove )
self.w.l = vanilla.SquareButton((spX, spY*4+txY*2+btnY, btnX, btnY), "←", sizeStyle='small', callback=self.nudgeMove )
self.w.dl = vanilla.SquareButton((spX, spY*5+txY*2+btnY*2, btnX, btnY), "↙", sizeStyle='small', callback=self.nudgeMove )
self.w.t = vanilla.SquareButton((spX*2+btnX, spY*3+txY*2, btnX, btnY), "↑", sizeStyle='small', callback=self.nudgeMove )
self.w.d = vanilla.SquareButton((spX*2+btnX, spY*5+txY*2+btnY*2, btnX, btnY), "↓", sizeStyle='small', callback=self.nudgeMove )
self.w.tr = vanilla.SquareButton((spX*3+btnX*2, spY*3+txY*2, btnX, btnY), "↗", sizeStyle='small', callback=self.nudgeMove )
self.w.r = vanilla.SquareButton((spX*3+btnX*2, spY*4+txY*2+btnY, btnX, btnY), "→", sizeStyle='small', callback=self.nudgeMove )
self.w.dr = vanilla.SquareButton((spX*3+btnX*2, spY*5+txY*2+btnY*2, btnX, btnY), "↘", sizeStyle='small', callback=self.nudgeMove )
self.LoadPreferences()
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.tosche.Nudge-movebyNumericalValue(GUI).fieldX"] = self.w.edX.get()
Glyphs.defaults["com.tosche.Nudge-movebyNumericalValue(GUI).fieldY"] = self.w.edY.get()
except:
return False
return True
def LoadPreferences( self ):
try:
self.w.edX.set( Glyphs.defaults["com.tosche.Nudge-movebyNumericalValue(GUI).fieldX"] )
self.w.edY.set( Glyphs.defaults["com.tosche.Nudge-movebyNumericalValue(GUI).fieldY"] )
self.w.slX.set( int(Glyphs.defaults["com.tosche.Nudge-movebyNumericalValue(GUI).fieldX"]) )
self.w.slY.set( int(Glyphs.defaults["com.tosche.Nudge-movebyNumericalValue(GUI).fieldY"]) )
except:
return False
return True
def sliderChange(self, sender):
try:
self.w.edX.set(int(self.w.slX.get()))
self.w.edY.set(int(self.w.slY.get()))
except Exception as e:
Glyphs.showMacroWindow()
print("Nudge-Move By Numerical Value... Error (sliderChange): %s" % e)
def textChange( self, sender ):
try:
edXvalue = int(self.w.edX.get()) if self.w.edX.get() != "" else 0
self.w.slX.set(edXvalue)
edYvalue = int(self.w.edY.get()) if self.w.edY.get() != "" else 0
self.w.slY.set(edYvalue)
except Exception as e:
Glyphs.showMacroWindow()
print("Nudge-Move By Numerical Value... Error (textChange): %s" % e)
def nudge(self, onMv, off1, off2, onSt, offsetX, offsetY):
try:
# onST = starting on-curve
# onMv = moving on-curve
distanceX = onMv.x - onSt.x
distanceX1 = onMv.x - off1.x
distanceX2 = off2.x - onSt.x
if distanceX != 0:
valueX1 = distanceX1/distanceX
valueX2 = distanceX2/distanceX
else:
valueX1 = 0
valueX2 = 0
if distanceX1 != 0:
off1.x += (1-valueX1)*offsetX
else:
off1.x += offsetX
if distanceX2 != 0:
off2.x += (valueX2)*offsetX
distanceY = onMv.y - onSt.y
distanceY1 = onMv.y - off1.y
distanceY2 = off2.y - onSt.y
if distanceY1 != 0:
off1.y += (1-distanceY1/distanceY)*offsetY
else:
off1.y += offsetY
if distanceY2 != 0:
off2.y += (distanceY2/distanceY)*offsetY
except Exception as e:
pass
# Glyphs.showMacroWindow()
# print("Nudge-move by Numerical Value Error (nudge): %s" % e)
def nudgeMove( self, sender ):
try:
if sender in [self.w.tl, self.w.l, self.w.dl]:
offsetX = -float(self.w.edX.get())
elif sender in [self.w.tr, self.w.r, self.w.dr]:
offsetX = float(self.w.edX.get())
else:
offsetX = 0.0
if sender in [self.w.tl, self.w.t, self.w.tr]:
offsetY = float(self.w.edY.get())
elif sender in [self.w.dl, self.w.d, self.w.dr]:
offsetY = -float(self.w.edY.get())
else:
offsetY = 0.0
except:
Glyphs.displayDialog_withTitle_("You seem to have entered a value that is not a number. Period is fine.", "Numbers only!")
try:
f = Glyphs.font # frontmost font
f.disableUpdateInterface()
for l in f.selectedLayers:
g = l.parent
g.beginUndo()
for p in l.paths:
for n in p.nodes:
if n in l.selection:
nPrev = n.prevNode
if (nPrev != None) and (not nPrev in l.selection):
if nPrev.type == GSOFFCURVE: # if on-curve is the edge of selection
if nPrev.prevNode.type == GSOFFCURVE:
oncurveMv = n
offcurve1 = nPrev
offcurve2 = nPrev.prevNode
oncurveSt = offcurve2.prevNode
elif nPrev.prevNode.type == GSCURVE: # if off-curve is the edge of selection
oncurveMv = n.nextNode
offcurve1 = n
offcurve2 = nPrev
oncurveSt = nPrev.prevNode
node.x -= offsetX
node.y -= offsetY
self.nudge(oncurveMv, offcurve1, offcurve2, oncurveSt, offsetX, offsetY)
nNext = n.nextNode
if (nNext != None) and (not nNext in l.selection):
if nNext.type == GSOFFCURVE: # if on-curve is the edge of selection
if nNext.nextNode.type == GSOFFCURVE:
oncurveMv = n
offcurve1 = nNext
offcurve2 = nNext.nextNode
oncurveSt = offcurve2.nextNode
elif nNext.nextNode.type == GSCURVE: # if off-curve is the edge of selection
nPrev.x -= offsetX
nPrev.y -= offsetY
oncurveMv = nPrev
offcurve1 = n
offcurve2 = nNext
oncurveSt = nNext.nextNode
nPrev.x += offsetX
nPrev.y += offsetY
n.x -= offsetX
n.y -= offsetY
self.nudge(oncurveMv, offcurve1, offcurve2, oncurveSt, offsetX, offsetY)
n.x += offsetX
n.y += offsetY
g.endUndo()
f.enableUpdateInterface()
if not self.SavePreferences( self ):
print("Note: 'Nudge-move by Numerical Value' could not write preferences.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Nudge-move by Numerical Value Error: %s" % e)
ParametricEstimated()
|
{
"content_hash": "ab38e24c24ee215d8576037fd6c09697",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 151,
"avg_line_length": 36.189189189189186,
"alnum_prop": 0.6549663928304705,
"repo_name": "Tosche/Glyphs-Scripts",
"id": "d82b7a409766e92e61bf546377205e67bc9eeb43",
"size": "8119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Path/Nudge-Move by Numerical Value.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "195886"
}
],
"symlink_target": ""
}
|
from .utils import *
from .zap import *
|
{
"content_hash": "d5a5daceb4ab3bf78112aa59baf1ebaf",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 20,
"avg_line_length": 20,
"alnum_prop": 0.7,
"repo_name": "musevlt/zap",
"id": "34995e4351765aba87e30937fd1021b8f8dbcbb4",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zap/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49093"
}
],
"symlink_target": ""
}
|
__author__ = 'popov.sn'
class ContactHelper:
def __init__(self, app):
self.app = app
def add_new_contact(self, contact):
wd = self.app.wd
self.go_to_new_contact_page()
self.fill_contact_form(contact)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_value("firstname", contact.name)
self.change_value("lastname", contact.lastname)
self.change_value("address", contact.addres)
def change_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def modify_first_contact(self, contact):
wd = self.app.wd
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
def delete_first_contact(self):
wd = self.app.wd
#select first contact
# wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_name("selected[]").click()
#submit "delete"
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# wd.find_element_by_value("Delete").click()
#commit deletion
wd.switch_to_alert().accept()
def go_to_new_contact_page(self):
# adding new contact
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def contact_count(self):
wd = self.app.wd
print("contact_Debug1")
print(len(wd.find_elements_by_name("selected[]")))
print("contact_Debug2")
return len(wd.find_elements_by_name("selected[]"))
def get_contact_list(self):
wd = self.app.wd
for c_element in wd.find_elements_by_css_selector("tr[name]"):
c_text = c_element.get_text()
c_element.find_element_by_name("selected[]").get_attribute
|
{
"content_hash": "b0a9bde87dd9e27e771761f022f23ffd",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 92,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.5925242270419936,
"repo_name": "popovsn777/python_training_2",
"id": "476f661cd7ab51a807a266faddd5bd1c596778b5",
"size": "2167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11833"
}
],
"symlink_target": ""
}
|
"""
PySCeS - Python Simulator for Cellular Systems
(http://pysces.sourceforge.net)
Copyright (C) B.G. Olivier, J.M. Rohwer, J.-H.S. Hofmeyr
Stellenbosch, 2004-2015.
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa
Author: Brett G. Olivier
PySCeS is Open Source Software distributed under
the GNU GENERAL PUBLIC LICENSE (see docs/GPL)
"""
# Init file used for distutils install
|
{
"content_hash": "9b809b875295ed222dae4431c88cee79",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.731651376146789,
"repo_name": "asttra/pysces",
"id": "f7c74a54cd8649d856acedbda8484c1b3677737c",
"size": "436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysces/examples/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "78"
},
{
"name": "FORTRAN",
"bytes": "1182461"
},
{
"name": "Papyrus",
"bytes": "5179"
},
{
"name": "Python",
"bytes": "1846699"
}
],
"symlink_target": ""
}
|
from handlers.base_handler import BaseHandler
class CatHandler(BaseHandler):
def get(self):
if self.request.cookies.get('remember'):
self.render("cats/index.html")
else:
self.render("cats/form.html")
def post(self):
if self.request.params.get('remember') == "on":
self.response.set_cookie('remember', "1")
self.redirect("/cats")
class ForgetCatHandler(BaseHandler):
def get(self):
self.response.delete_cookie("remember")
self.redirect("/cats")
|
{
"content_hash": "4000c6fc0b6acc931e3229ca91ad0be2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.6169429097605893,
"repo_name": "xstrengthofonex/code-live-tutorials",
"id": "cacead8374f8c7e6dd37ae151ec2affdb1ae4706",
"size": "543",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python_web_development/authentication/handlers/cat_handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9305"
},
{
"name": "JavaScript",
"bytes": "38"
},
{
"name": "Python",
"bytes": "51171"
}
],
"symlink_target": ""
}
|
"""Helpers for working with signatures in tf.saved_model.save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training.tracking import base
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
SIGNATURE_ATTRIBUTE_NAME = "signatures"
# Max number of warnings to show if signature contains normalized input names.
_NUM_DISPLAY_NORMALIZED_SIGNATURES = 5
def _get_signature(function):
if (isinstance(function, (defun.Function, def_function.Function)) and
function.input_signature is not None):
function = function._get_concrete_function_garbage_collected() # pylint: disable=protected-access
if not isinstance(function, defun.ConcreteFunction):
return None
return function
def _valid_signature(concrete_function):
"""Returns whether concrete function can be converted to a signature."""
if not concrete_function.outputs:
# Functions without outputs don't make sense as signatures. We just don't
# have any way to run an Operation with no outputs as a SignatureDef in the
# 1.x style.
return False
try:
_validate_inputs(concrete_function)
_normalize_outputs(concrete_function.structured_outputs, "unused", "unused")
except ValueError:
return False
return True
def _validate_inputs(concrete_function):
"""Raises error if input type is tf.Variable."""
if any(isinstance(inp, resource_variable_ops.VariableSpec)
for inp in nest.flatten(
concrete_function.structured_input_signature)):
raise ValueError(("Functions that expect tf.Variable inputs cannot be "
"exported as signatures."))
def _get_signature_name_changes(concrete_function):
"""Checks for user-specified signature input names that are normalized."""
# Map of {user-given name: normalized name} if the names are un-identical.
name_changes = {}
for signature_input_name, graph_input in zip(
concrete_function.function_def.signature.input_arg,
concrete_function.graph.inputs):
try:
user_specified_name = compat.as_str(
graph_input.op.get_attr("_user_specified_name"))
if signature_input_name.name != user_specified_name:
name_changes[user_specified_name] = signature_input_name.name
except ValueError:
# Signature input does not have a user-specified name.
pass
return name_changes
def find_function_to_export(saveable_view):
"""Function to export, None if no suitable function was found."""
# If the user did not specify signatures, check the root object for a function
# that can be made into a signature.
functions = saveable_view.list_functions(saveable_view.root)
signature = functions.get(DEFAULT_SIGNATURE_ATTR, None)
if signature is not None:
return signature
# TODO(andresp): Discuss removing this behaviour. It can lead to WTFs when a
# user decides to annotate more functions with tf.function and suddenly
# serving that model way later in the process stops working.
possible_signatures = []
for function in functions.values():
concrete = _get_signature(function)
if concrete is not None and _valid_signature(concrete):
possible_signatures.append(concrete)
if len(possible_signatures) == 1:
single_function = possible_signatures[0]
signature = _get_signature(single_function)
if signature and _valid_signature(signature):
return signature
return None
def canonicalize_signatures(signatures):
"""Converts `signatures` into a dictionary of concrete functions."""
if signatures is None:
return {}, {}
if not isinstance(signatures, collections_abc.Mapping):
signatures = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
num_normalized_signatures_counter = 0
concrete_signatures = {}
wrapped_functions = {}
for signature_key, function in signatures.items():
original_function = signature_function = _get_signature(function)
if signature_function is None:
raise ValueError(
("Expected a TensorFlow function to generate a signature for, but "
"got {}. Only `tf.functions` with an input signature or "
"concrete functions can be used as a signature.").format(function))
wrapped_functions[original_function] = signature_function = (
wrapped_functions.get(original_function) or
function_serialization.wrap_cached_variables(original_function))
_validate_inputs(signature_function)
if num_normalized_signatures_counter < _NUM_DISPLAY_NORMALIZED_SIGNATURES:
signature_name_changes = _get_signature_name_changes(signature_function)
if signature_name_changes:
num_normalized_signatures_counter += 1
logging.warning(
"Function `%s` contains input name(s) %s with unsupported "
"characters which will be renamed to %s in the SavedModel.",
compat.as_str(signature_function.graph.name),
", ".join(signature_name_changes.keys()),
", ".join(signature_name_changes.values()))
# Re-wrap the function so that it returns a dictionary of Tensors. This
# matches the format of 1.x-style signatures.
# pylint: disable=cell-var-from-loop
@def_function.function
def signature_wrapper(**kwargs):
structured_outputs = signature_function(**kwargs)
return _normalize_outputs(
structured_outputs, signature_function.name, signature_key)
tensor_spec_signature = {}
if signature_function.structured_input_signature is not None:
# The structured input signature may contain other non-tensor arguments.
inputs = filter(
lambda x: isinstance(x, tensor_spec.TensorSpec),
nest.flatten(signature_function.structured_input_signature,
expand_composites=True))
else:
# Structured input signature isn't always defined for some functions.
inputs = signature_function.inputs
for keyword, inp in zip(
signature_function._arg_keywords, # pylint: disable=protected-access
inputs):
keyword = compat.as_str(keyword)
if isinstance(inp, tensor_spec.TensorSpec):
spec = tensor_spec.TensorSpec(inp.shape, inp.dtype, name=keyword)
else:
spec = tensor_spec.TensorSpec.from_tensor(inp, name=keyword)
tensor_spec_signature[keyword] = spec
final_concrete = signature_wrapper._get_concrete_function_garbage_collected( # pylint: disable=protected-access
**tensor_spec_signature)
# pylint: disable=protected-access
if len(final_concrete._arg_keywords) == 1:
# If there is only one input to the signature, a very common case, then
# ordering is unambiguous and we can let people pass a positional
# argument. Since SignatureDefs are unordered (protobuf "map") multiple
# arguments means we need to be keyword-only.
final_concrete._num_positional_args = 1
else:
final_concrete._num_positional_args = 0
# pylint: enable=protected-access
concrete_signatures[signature_key] = final_concrete
# pylint: enable=cell-var-from-loop
return concrete_signatures, wrapped_functions
def _normalize_outputs(outputs, function_name, signature_key):
"""Construct an output dictionary from unnormalized function outputs."""
# Convert `outputs` to a dictionary (if it's not one already).
if not isinstance(outputs, collections_abc.Mapping):
if not isinstance(outputs, collections_abc.Sequence):
outputs = [outputs]
outputs = {("output_{}".format(output_index)): output
for output_index, output
in enumerate(outputs)}
# Check that the keys of `outputs` are strings and the values are Tensors.
for key, value in outputs.items():
if not isinstance(key, compat.bytes_or_text_types):
raise ValueError(
("Got a dictionary with a non-string key {!r} in the output of the "
"function {} used to generate the SavedModel signature {!r}.")
.format(key, compat.as_str_any(function_name), signature_key))
if not isinstance(value, ops.Tensor):
raise ValueError(
("Got a non-Tensor value {!r} for key {!r} in the output of the "
"function {} used to generate the SavedModel signature {!r}. "
"Outputs for functions used as signatures must be a single Tensor, "
"a sequence of Tensors, or a dictionary from string to Tensor.")
.format(value, key, compat.as_str_any(function_name), signature_key))
return outputs
# _SignatureMap is immutable to ensure that users do not expect changes to be
# reflected in the SavedModel. Using public APIs, tf.saved_model.load() is the
# only way to create a _SignatureMap and there is no way to modify it. So we can
# safely ignore/overwrite ".signatures" attributes attached to objects being
# saved if they contain a _SignatureMap. A ".signatures" attribute containing
# any other type (e.g. a regular dict) will raise an exception asking the user
# to first "del obj.signatures" if they want it overwritten.
class _SignatureMap(collections_abc.Mapping, base.Trackable):
"""A collection of SavedModel signatures."""
def __init__(self):
self._signatures = {}
def _add_signature(self, name, concrete_function):
"""Adds a signature to the _SignatureMap."""
# Ideally this object would be immutable, but restore is streaming so we do
# need a private API for adding new signatures to an existing object.
self._signatures[name] = concrete_function
def __getitem__(self, key):
return self._signatures[key]
def __iter__(self):
return iter(self._signatures)
def __len__(self):
return len(self._signatures)
def __repr__(self):
return "_SignatureMap({})".format(self._signatures)
def _list_functions_for_serialization(self, unused_serialization_cache):
return {
key: value for key, value in self.items()
if isinstance(value, (def_function.Function, defun.ConcreteFunction))
}
revived_types.register_revived_type(
"signature_map",
lambda obj: isinstance(obj, _SignatureMap),
versions=[revived_types.VersionedTypeRegistration(
# Standard dependencies are enough to reconstruct the trackable
# items in dictionaries, so we don't need to save any extra information.
object_factory=lambda proto: _SignatureMap(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_SignatureMap._add_signature # pylint: disable=protected-access
)])
def create_signature_map(signatures):
"""Creates an object containing `signatures`."""
signature_map = _SignatureMap()
for name, func in signatures.items():
# This true of any signature that came from canonicalize_signatures. Here as
# a sanity check on saving; crashing on load (e.g. in _add_signature) would
# be more problematic in case future export changes violated these
# assertions.
assert isinstance(func, defun.ConcreteFunction)
assert isinstance(func.structured_outputs, collections_abc.Mapping)
# pylint: disable=protected-access
if len(func._arg_keywords) == 1:
assert 1 == func._num_positional_args
else:
assert 0 == func._num_positional_args
signature_map._add_signature(name, func)
# pylint: enable=protected-access
return signature_map
def validate_saveable_view(saveable_view):
"""Performs signature-related sanity checks on `saveable_view`."""
for name, dep in saveable_view.list_dependencies(
saveable_view.root):
if name == SIGNATURE_ATTRIBUTE_NAME:
if not isinstance(dep, _SignatureMap):
raise ValueError(
("Exporting an object {} which has an attribute named "
"'{signatures}'. This is a reserved attribute used to store "
"SavedModel signatures in objects which come from "
"`tf.saved_model.load`. Delete this attribute "
"(e.g. 'del obj.{signatures}') before saving if this shadowing is "
"acceptable.").format(
saveable_view.root,
signatures=SIGNATURE_ATTRIBUTE_NAME))
break
|
{
"content_hash": "daf6e873d1aa261db26ca638a1a7f007",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 116,
"avg_line_length": 43.06060606060606,
"alnum_prop": 0.705137227304715,
"repo_name": "freedomtan/tensorflow",
"id": "4250efd7c01e7e36e630b34b5f58d2959ea85e4a",
"size": "13478",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/saved_model/signature_serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import unittest
import jsch
class KeywordAccessTestCase(unittest.TestCase):
def assertRaisesAttributeError(self, message):
regex = '^{0}$'.format(message)
return self.assertRaisesRegex(AttributeError, regex)
class TestAdditionalItemsKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(additional_items=True)
self.assertEqual(True, schema.additional_items)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.additional_items)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.additional_items = True
class TestAdditionalPropertiesKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(additional_properties=True)
self.assertEqual(True, schema.additional_properties)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.additional_properties)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.additional_properties = True
class TestAllOfKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(all_of=[jsch.Schema()])
self.assertEqual([jsch.Schema()], schema.all_of)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.all_of)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.all_of = [jsch.Schema()]
class TestAnyOfKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(any_of=[jsch.Schema()])
self.assertEqual([jsch.Schema()], schema.any_of)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.any_of)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.any_of = [jsch.Schema()]
class TestDefaultKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(default=0)
self.assertEqual(0, schema.default)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.default)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.default = 0
class TestDefinitionsKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(definitions={'name': jsch.Schema()})
self.assertEqual({'name': jsch.Schema()}, schema.definitions)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.definitions)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.definitions = {'name': jsch.Schema()}
class TestDependenciesKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(dependencies={'name': ['age']})
self.assertEqual({'name': ['age']}, schema.dependencies)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.dependencies)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.dependencies = {'name': ['age']}
class TestDescriptionKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(description='Represents a schema')
self.assertEqual('Represents a schema', schema.description)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.description)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.description = 'Represents a schema'
class TestEnumKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(enum=[9, 7, 2])
self.assertEqual([9, 7, 2], schema.enum)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.enum)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.enum = [9, 7, 2]
class TestExclusiveMaximumKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(maximum=9, exclusive_maximum=True)
self.assertEqual(True, schema.exclusive_maximum)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.exclusive_maximum)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.exclusive_maximum = True
class TestExclusiveMinimumKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(minimum=1, exclusive_minimum=True)
self.assertEqual(True, schema.exclusive_minimum)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.exclusive_minimum)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.exclusive_minimum = True
class TestIdKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(id='#def')
self.assertEqual('#def', schema.id)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.id)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.id = '#def'
class TestItemsKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(items=jsch.Schema())
self.assertEqual(jsch.Schema(), schema.items)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.items)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.items = jsch.Schema()
class TestMaxItemsKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(max_items=6)
self.assertEqual(6, schema.max_items)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.max_items)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.max_items = 6
class TestMaxLengthKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(max_length=2)
self.assertEqual(2, schema.max_length)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.max_length)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.max_length = 2
class TestMaxPropertiesKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(max_properties=6)
self.assertEqual(6, schema.max_properties)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.max_properties)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.max_properties = 6
class TestMaximumKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(maximum=8.9)
self.assertEqual(8.9, schema.maximum)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.maximum)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.maximum = 8.9
class TestMinItemsKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(min_items=3)
self.assertEqual(3, schema.min_items)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.min_items)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.min_items = 3
class TestMinLengthKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(min_length=2)
self.assertEqual(2, schema.min_length)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.min_length)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.min_length = 2
class TestMinPropertiesKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(min_properties=4)
self.assertEqual(4, schema.min_properties)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.min_properties)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.min_properties = 4
class TestMinimumKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(minimum=6.2)
self.assertEqual(6.2, schema.minimum)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.minimum)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.minimum = 6.2
class TestMultipleOfKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(multiple_of=4.7)
self.assertEqual(4.7, schema.multiple_of)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.multiple_of)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.multiple_of = 4.7
class TestNotKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(not_=jsch.Schema())
self.assertEqual(jsch.Schema(), schema.not_)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.not_)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.not_ = jsch.Schema()
class TestOneOfKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(one_of=[jsch.Schema()])
self.assertEqual([jsch.Schema()], schema.one_of)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.one_of)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.one_of = [jsch.Schema()]
class TestPatternKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(pattern='[0-9A-Z]')
self.assertEqual('[0-9A-Z]', schema.pattern)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.pattern)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.pattern = '[0-9A-Z]'
class TestPatternPropertiesKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(pattern_properties={'[0-9]': jsch.Schema()})
self.assertEqual({'[0-9]': jsch.Schema()}, schema.pattern_properties)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.pattern_properties)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.pattern_properties = {'[0-9]': jsch.Schema()}
class TestPropertiesKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(properties={'name': jsch.Schema()})
self.assertEqual({'name': jsch.Schema()}, schema.properties)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.properties)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.properties = {'name': jsch.Schema()}
class TestRefKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(ref='#/definitions/person')
self.assertEqual('#/definitions/person', schema.ref)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.ref)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.ref = '#/definitions/person'
class TestRequiredKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(required=['name', 'age'])
self.assertEqual(['name', 'age'], schema.required)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.required)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.required = ['name', 'age']
class TestTitleKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(title='jsch')
self.assertEqual('jsch', schema.title)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.title)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.title = 'jsch'
class TestTypeKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(type='string')
self.assertEqual('string', schema.type)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.type)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.type = 'string'
class TestUniqueItemsKeywordAccess(KeywordAccessTestCase):
def test_read_property(self):
schema = jsch.Schema(unique_items=True)
self.assertEqual(True, schema.unique_items)
def test_read_unassigned_property(self):
schema = jsch.Schema()
self.assertIsNone(schema.unique_items)
def test_set_property(self):
schema = jsch.Schema()
message = "can't set keyword attribute"
with self.assertRaisesAttributeError(message):
schema.unique_items = True
|
{
"content_hash": "ff86fb440636578558380d43bb05b943",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 77,
"avg_line_length": 32.98272552783109,
"alnum_prop": 0.6656191806331471,
"repo_name": "rob-earwaker/jsch",
"id": "3ded4c8a43ad481f459c89a7dbf49a1cfc0f7efa",
"size": "17184",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_schema_keyword_access.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58620"
}
],
"symlink_target": ""
}
|
import test.test_support, unittest
from sets import Set
import UserDict
class TestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
_tested_class = dict # which class is being tested
def _reference(self):
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {1:2, "key1":"value1", "key2":(1,2,3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self._tested_class()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
key, value = self.reference.popitem()
self.other = {key:value}
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = self.other.keys()[0]
self.failUnlessRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#has_key
for k in self.reference:
self.assert_(d.has_key(k))
self.assert_(k in d)
for k in self.other:
self.failIf(d.has_key(k))
self.failIf(k in d)
#cmp
self.assertEqual(cmp(p,p), 0)
self.assertEqual(cmp(d,d), 0)
self.assertEqual(cmp(p,d), -1)
self.assertEqual(cmp(d,p), 1)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assert_(hasattr(iter, 'next'))
self.assert_(hasattr(iter, '__iter__'))
x = list(iter)
self.assert_(Set(x)==Set(lst)==Set(ref))
check_iterandlist(d.iterkeys(), d.keys(), self.reference.keys())
check_iterandlist(iter(d), d.keys(), self.reference.keys())
check_iterandlist(d.itervalues(), d.values(), self.reference.values())
check_iterandlist(d.iteritems(), d.items(), self.reference.items())
#get
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.failIf(knownkey in d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.failUnlessRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefaullt
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.failIf(knownkey in d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.failIf(knownkey in d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.failIf(key in d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(TestMappingProtocol):
_tested_class = UserDict.IterableUserDict
def test_all(self):
# Test constructors
u = UserDict.UserDict()
u0 = UserDict.UserDict(d0)
u1 = UserDict.UserDict(d1)
u2 = UserDict.IterableUserDict(d2)
uu = UserDict.UserDict(u)
uu0 = UserDict.UserDict(u0)
uu1 = UserDict.UserDict(u1)
uu2 = UserDict.UserDict(u2)
# keyword arg constructor
self.assertEqual(UserDict.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(UserDict.UserDict(dict=[('one',1), ('two',2)]), d2)
# both together
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split(), 1), d5)
self.assert_(u1.fromkeys('one two'.split()) is not u1)
self.assert_(isinstance(u1.fromkeys('one two'.split()), UserDict.UserDict))
self.assert_(isinstance(u2.fromkeys('one two'.split()), UserDict.IterableUserDict))
# Test __repr__
# zyasoft - the below is not necessarily true, we cannot
# depend on the ordering of how the string is constructed;
# unless we require that it be sorted, or otherwise ordered in
# some consistent fashion
# for repr, we can use eval, so that's what we will do here
# self.assertEqual(str(u0), str(d0))
# self.assertEqual(repr(u1), repr(d1))
# self.assertEqual(`u2`, `d2`)
self.assertEqual(eval(repr(u1)), eval(repr(d1)))
self.assertEqual(eval(`u2`), eval(`d2`))
# end zyasoft ~
# Test __cmp__ and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(cmp(a, b), cmp(len(a), len(b)))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = UserDict.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = UserDict.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(UserDict.UserDict):
def display(self): print self
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# zyasoft - changed the following three assertions to use sets
# to remove order dependency
# Test keys, items, values
self.assertEqual(set(u2.keys()), set(d2.keys()))
self.assertEqual(set(u2.items()), set(d2.items()))
self.assertEqual(set(u2.values()), set(d2.values()))
# Test has_key and "in".
for i in u2.keys():
self.assert_(u2.has_key(i))
self.assert_(i in u2)
self.assertEqual(u1.has_key(i), d1.has_key(i))
self.assertEqual(i in u1, i in d1)
self.assertEqual(u0.has_key(i), d0.has_key(i))
self.assertEqual(i in u0, i in d0)
# Test update
t = UserDict.UserDict()
t.update(u2)
self.assertEqual(t, u2)
class Items:
def items(self):
return (("x", 42), ("y", 23))
t = UserDict.UserDict()
t.update(Items())
self.assertEqual(t, {"x": 42, "y": 23})
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in xrange(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(Set(ikeys), Set(keys))
# Test setdefault
t = UserDict.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assert_(t.has_key("x"))
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = UserDict.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = UserDict.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
##########################
# Test Dict Mixin
class SeqDict(UserDict.DictMixin):
"""Dictionary lookalike implemented with lists.
Used to test and demonstrate DictMixin
"""
def __init__(self):
self.keylist = []
self.valuelist = []
def __getitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
return self.valuelist[i]
def __setitem__(self, key, value):
try:
i = self.keylist.index(key)
self.valuelist[i] = value
except ValueError:
self.keylist.append(key)
self.valuelist.append(value)
def __delitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
self.keylist.pop(i)
self.valuelist.pop(i)
def keys(self):
return list(self.keylist)
class UserDictMixinTest(TestMappingProtocol):
_tested_class = SeqDict
def test_all(self):
## Setup test and verify working of the test class
# check init
s = SeqDict()
# exercise setitem
s[10] = 'ten'
s[20] = 'twenty'
s[30] = 'thirty'
# exercise delitem
del s[20]
# check getitem and setitem
self.assertEqual(s[10], 'ten')
# check keys() and delitem
self.assertEqual(s.keys(), [10, 30])
## Now, test the DictMixin methods one by one
# has_key
self.assert_(s.has_key(10))
self.assert_(not s.has_key(20))
# __contains__
self.assert_(10 in s)
self.assert_(20 not in s)
# __iter__
self.assertEqual([k for k in s], [10, 30])
# __len__
self.assertEqual(len(s), 2)
# iteritems
self.assertEqual(list(s.iteritems()), [(10,'ten'), (30, 'thirty')])
# iterkeys
self.assertEqual(list(s.iterkeys()), [10, 30])
# itervalues
self.assertEqual(list(s.itervalues()), ['ten', 'thirty'])
# values
self.assertEqual(s.values(), ['ten', 'thirty'])
# items
self.assertEqual(s.items(), [(10,'ten'), (30, 'thirty')])
# get
self.assertEqual(s.get(10), 'ten')
self.assertEqual(s.get(15,'fifteen'), 'fifteen')
self.assertEqual(s.get(15), None)
# setdefault
self.assertEqual(s.setdefault(40, 'forty'), 'forty')
self.assertEqual(s.setdefault(10, 'null'), 'ten')
del s[40]
# pop
self.assertEqual(s.pop(10), 'ten')
self.assert_(10 not in s)
s[10] = 'ten'
self.assertEqual(s.pop("x", 1), 1)
s["x"] = 42
self.assertEqual(s.pop("x", 1), 42)
# popitem
k, v = s.popitem()
self.assert_(k not in s)
s[k] = v
# clear
s.clear()
self.assertEqual(len(s), 0)
# empty popitem
self.assertRaises(KeyError, s.popitem)
# update
s.update({10: 'ten', 20:'twenty'})
self.assertEqual(s[10], 'ten')
self.assertEqual(s[20], 'twenty')
# cmp
self.assertEqual(s, {10: 'ten', 20:'twenty'})
t = SeqDict()
t[20] = 'twenty'
t[10] = 'ten'
self.assertEqual(s, t)
def test_main():
test.test_support.run_unittest(
TestMappingProtocol,
UserDictTest,
UserDictMixinTest
)
if __name__ == "__main__":
test_main()
|
{
"content_hash": "9618ed0b464df5a15b2ab848809f022e",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 91,
"avg_line_length": 32.23132530120482,
"alnum_prop": 0.555622009569378,
"repo_name": "babble/babble",
"id": "41b9669effdbdaac38ce160ab8f691aea3ad7244",
"size": "13429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "include/jython/Lib/test/test_userdict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3378"
},
{
"name": "Groovy",
"bytes": "16151"
},
{
"name": "Java",
"bytes": "7316421"
},
{
"name": "JavaScript",
"bytes": "644844"
},
{
"name": "Python",
"bytes": "10107943"
},
{
"name": "Ruby",
"bytes": "4961765"
},
{
"name": "Shell",
"bytes": "2575"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import sys
from typing import List
from sinter._csv_out import CSV_HEADER
from sinter._existing_data import ExistingData
def main_combine(*, command_line_args: List[str]):
if command_line_args:
total = ExistingData()
for path in command_line_args:
total += ExistingData.from_file(path)
else:
total = ExistingData.from_file(sys.stdin)
print(CSV_HEADER)
for value in total.data.values():
print(value.to_csv_line())
|
{
"content_hash": "b5b5a2dff0465af33efd002d51b77a03",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 50,
"avg_line_length": 26.5,
"alnum_prop": 0.6666666666666666,
"repo_name": "quantumlib/Stim",
"id": "f61e3086699aed851332d83ffcb0a86eb36ab052",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "glue/sample/src/sinter/_main_combine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4500"
},
{
"name": "C++",
"bytes": "2703579"
},
{
"name": "CMake",
"bytes": "4590"
},
{
"name": "HTML",
"bytes": "8333"
},
{
"name": "JavaScript",
"bytes": "14013"
},
{
"name": "Python",
"bytes": "877557"
},
{
"name": "Shell",
"bytes": "4765"
},
{
"name": "Starlark",
"bytes": "3470"
}
],
"symlink_target": ""
}
|
from generateData import *
from gmm3 import *
X0train = generateDataCat('data/train.txt',0)
X1train = generateDataCat('data/train.txt',1)
# training
k0 = 4
k1 = 4
gmm0 = GMM(k0,X0train)
# train GMM0
gmm0.gmm(X0train,k0)
gmm1 = GMM(k1,X1train)
gmm1.gmm(X1train,k1)
X0dev = generateDataCat('data/dev.txt',0)
X1dev = generateDataCat('data/dev.txt',1)
Xdev = generateData('data/dev.txt')
ansDev = generateAns('data/dev.txt')
redev = generateResult(gmm0,gmm1,Xdev)
devacc = evaluate(redev,ansDev)
print 'Accuarcy on Dev dataset: ', devacc
# generate test result
Xtest = generateData('data/test.txt')
retest = generateResult(gmm0,gmm1,Xtest)
row, col = np.shape(Xtest)
print "row, col", row, " ",col
outf = open('result/result0.txt','w')
for i in range(row):
line = ""
for j in range(col):
line += str(np.array(Xtest)[i][j])
line += " "
line += " "
line += str(retest[i]+1)
line += '\n'
outf.write(line)
outf.close()
"""
test for change-case in atom
hello/word
"""
|
{
"content_hash": "f62433c76becd0ba492ba9f898c9f798",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 45,
"avg_line_length": 20.375,
"alnum_prop": 0.6840490797546013,
"repo_name": "yysherlock/gmm-classifier",
"id": "7e7b52b8d31f1a879e14ff67cdfb2851d2f2bdb7",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classifier.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11143"
}
],
"symlink_target": ""
}
|
"""
run with: sudo fig run web nosetests geolocator/app/tests/test_weighter.py
"""
from app.weighter import *
from app.geolocator import *
import unittest
from sqlalchemy import text
from nose.tools import nottest
class LocationAdminNamesTestCase(unittest.TestCase):
"""
Tests app.weighter.LocationAdminNames
"""
# ----------------------- Before/After ----------------------- #
def setUp(self):
"""
Executed at the start of every test
Instantiates a new instance of weighter.LocationAdminNames()
"""
self.names = LocationAdminNames()
return
def tearDown(self):
"""
Executed at the end of every test
"""
self.names = None
return
# ----------------------- Helpers ----------------------- #
# ----------------------- Tests ----------------------- #
def test__init__pass(self):
"""
Ensures that the weighter.LocationAdminNames successfully initializes
"""
assert isinstance(self.names, LocationAdminNames)
assert self.names.geonameid == -1
assert self.names.name is None
assert self.names.admin4name is None
assert self.names.admin3name is None
assert self.names.admin2name is None
assert self.names.admin1name is None
assert self.names.countryname is None
def test__list__pass(self):
"""
Tests app.weighter.LocationAdminNames.list
"""
expected = ['May', 'the', 'Force', 'be', 'with you!']
self.names.admin4name = expected[0]
self.names.admin3name = expected[1]
self.names.admin2name = expected[2]
self.names.admin1name = expected[3]
self.names.countryname = expected[4]
actual = self.names.list()
assert expected == actual
def test__match__admin1name(self):
"""
Tests app.weighter.LocationAdminNames.match where name matches
admin1name
"""
NAME = 'Taiwan'
self.names.admin1name = NAME
assert self.names.match(NAME)
def test__match__admin2name(self):
"""
Tests app.weighter.LocationAdminNames.match where name matches
admin2name
"""
NAME = 'Yellow'
self.names.admin1name = NAME
assert self.names.match(NAME)
def test__match__admin3name(self):
"""
Tests app.weighter.LocationAdminNames.match where name matches
admin3name
"""
NAME = 'Blue'
self.names.admin1name = NAME
assert self.names.match(NAME)
def test__match__admin4name(self):
"""
Tests app.weighter.LocationAdminNames.match where name matches
admin4name
"""
NAME = 'Red'
self.names.admin1name = NAME
assert self.names.match(NAME)
def test__match__countryname(self):
"""
Tests app.weighter.LocationAdminNames.match where name matches
countryname
"""
NAME = 'China'
self.names.admin1name = NAME
assert self.names.match(NAME)
def test__eq__pass(self):
"""
Tests app.weighter.LocationAdminNames.__eq__ with two equal objects
"""
A4 = 'apple'
A2 = 'peanut butter'
NAME = 'Joe'
c1 = LocationAdminNames()
c1.admin4name = A4
c1.admin2name = A2
c1.name = NAME
c2 = LocationAdminNames()
c2.admin4name = A4
c2.admin2name = A2
c2.name = NAME
assert c1 == c2
def test__eq__fail(self):
"""
Tests app.weighter.LocationAdminNames.__eq__ with two different objects
"""
A4 = 'apple'
A2 = 'peanut butter'
NAME = 'Frank'
c1 = LocationAdminNames()
c1.admin4name = A4
c1.admin2name = A2
c1.name = 'Joe'
c2 = LocationAdminNames()
c2.admin4name = A4
c2.admin2name = A2
c2.name = NAME
assert c1 != c2
def test__repr__good(self):
"""
Tests app.weighter.LocationAdminNames.__repr__
"""
self.names.name = 'Banana'
self.names.countryname = 'United States'
self.names.admin3name = 'Apple'
# test if any exceptions fire
s = self.names.__repr__()
assert s is not None
assert isinstance(s, str)
class LocationAdminCodesTestCase(unittest.TestCase):
"""
Tests app.weighter.LocationAdminCodes
"""
# ----------------------- Before/After ----------------------- #
def setUp(self):
"""
Executed at the start of every test
Instantiates a new instance of weighter.LocationAdminCodes()
"""
self.codes = LocationAdminCodes()
return
def tearDown(self):
"""
Executed at the end of every test
"""
self.codes = None
return
# ----------------------- Helpers ----------------------- #
# ----------------------- Tests ----------------------- #
def test__init__pass(self):
"""
Ensures that the weighter.LocationAdminCodes successfully initializes
"""
assert isinstance(self.codes, LocationAdminCodes)
assert self.codes.geonameid == -1
assert self.codes.name is None
assert self.codes.featurecode is None
assert self.codes.featureclass is None
assert self.codes.admin4code is None
assert self.codes.admin3code is None
assert self.codes.admin2code is None
assert self.codes.admin1code is None
assert self.codes.countrycode is None
def test__eq__pass(self):
"""
Tests app.weighter.LocationAdminCodes.__eq__ with two equal objects
"""
A4 = 'apple'
A2 = 'peanut butter'
FC = '5'
NAME = 'Joe'
c1 = LocationAdminCodes()
c1.admin4code = A4
c1.admin2code = A2
c1.featurecode = FC
c1.name = NAME
c2 = LocationAdminCodes()
c2.admin4code = A4
c2.admin2code = A2
c2.featurecode = FC
c2.name = NAME
assert c1 == c2
def test__eq__fail(self):
"""
Tests app.weighter.LocationAdminCodes.__eq__ with two different objects
"""
A4 = 'apple'
A2 = 'peanut butter'
FC = '5'
NAME = 'Joe'
c1 = LocationAdminCodes()
c1.admin4code = A4
c1.admin2code = A4
c1.featurecode = FC
c1.name = FC
c2 = LocationAdminCodes()
c2.admin4code = A2
c2.admin2code = A4
c2.featurecode = NAME
c2.name = NAME
assert c1 != c2
def test__repr__good(self):
"""
Tests app.weighter.LocationAdminCodes.__repr__
"""
self.codes.name = 'Banana'
self.codes.featurecode = 'PPL'
self.codes.featureclass = 'P'
self.codes.countrycode = 'US'
# test if any exceptions fire
s = self.codes.__repr__()
assert s is not None
assert isinstance(s, str)
class QueryTestCase(unittest.TestCase):
"""
Tests app.weighter.Query
"""
# ----------------------- Before/After ----------------------- #
def setUp(self):
self.basic_query = self.init_basic()
return
def tearDown(self):
self.db = None
return
# ----------------------- Helpers ----------------------- #
def init_basic(self):
return Query(selects=[], froms=[], wheres=[])
# ----------------------- Tests ----------------------- #
def test__init__success(self):
"""
successful initialization
"""
list1 = [1, 2, 3]
list2 = ['fea', 'gagea', 'afeagt3']
# --- 1 ---
q = Query(selects=list1, froms=list2)
assert q.selects == list1
assert q.froms == list2
assert q.wheres is None
# --- 2 ---
q = Query(selects=list1, froms=list2, wheres=list1)
assert q.selects == list1
assert q.froms == list2
assert q.wheres == list1
def test__init__no_list(self):
"""
selects should be a list
froms should be list
wheres should be None or list
"""
not_a_list = 1
list1 = ['test list1', 'banana', 'apple']
list2 = ['test list2', 'orange']
self.assertRaises(TypeError, Query, not_a_list, list2, list1)
self.assertRaises(TypeError, Query, list1, not_a_list, list2)
self.assertRaises(TypeError, Query, list1, list2, not_a_list)
q = Query(selects=list1, froms=list2, wheres=None)
assert q.wheres is None
def test__expand_list__success(self):
result = self.basic_query.expand_list(['1', '2', '3', '4'])
assert result == '1, 2, 3, 4'
def test__expand_list__empty_list(self):
result = self.basic_query.expand_list([])
assert result == ''
def test__expand_list__none_param(self):
result = self.basic_query.expand_list(None)
assert result == ''
def test__expand_list__no_params(self):
self.assertRaises(TypeError, self.basic_query.expand_list)
def test__expand_list__list_of_length_1(self):
result = self.basic_query.expand_list('1')
assert result == '1'
def test__expand_list__3_params(self):
self.assertRaises(
TypeError,
self.basic_query.expand_list, ['1', '2'], 'param2', 'param3')
def test__expand_list__unique_separator(self):
result = self.basic_query.expand_list(['1', '2', '3', '4'], '--- ')
assert result == '1--- 2--- 3--- 4'
def test__expand_list__none_separator(self):
result = self.basic_query.expand_list(['1', '2', '3', '4'], None)
assert result == '1234'
def test__expand_list__empty_separator(self):
result = self.basic_query.expand_list(['1', '2', '3', '4'], '')
assert result == '1234'
def test__add_sql__success(self):
s1 = 'With a love like that'
s2 = 'You know should be glad!'
self.basic_query._add_sql(s1)
assert self.basic_query.sql == s1
self.basic_query._add_sql(s2)
assert self.basic_query.sql == (s1 + ' ' + s2)
def test__add_sql__empty(self):
s1 = ''
self.basic_query._add_sql(s1)
assert self.basic_query.sql == s1
self.basic_query._add_sql(s1)
assert self.basic_query.sql == ''
def test__add_sql__none_param(self):
self.assertRaises(TypeError, self.basic_query._add_sql, None)
def test__add_sql__no_params(self):
self.assertRaises(TypeError, self.basic_query._add_sql)
def test__add_sql__two_params(self):
self.assertRaises(
TypeError,
self.basic_query._add_sql, 'param1', 'param2')
def test__to_sql__success_with_wheres(self):
SELECTS = ['s0', 's1']
FROMS = ['f0', 'f1']
WHERES = ['w0=w1', 'w1<=2']
q = Query(selects=SELECTS, froms=FROMS, wheres=WHERES)
expected = (
'select %s, %s from %s, %s where %s AND %s'
% (SELECTS[0], SELECTS[1], FROMS[0], FROMS[1], WHERES[0],
WHERES[1]))
expected = text(expected)
actual = q.to_sql()
assert expected.text == actual.text
def test__to_sql__success_without_wheres(self):
SELECTS = ['s0', 's1']
FROMS = ['f0', 'f1']
q = Query(selects=SELECTS, froms=FROMS)
expected = (
'select %s, %s from %s, %s'
% (SELECTS[0], SELECTS[1], FROMS[0], FROMS[1]))
expected = text(expected)
actual = q.to_sql()
assert expected.text == actual.text
def test__repr__(self):
"""
Tests Query.__repr__
"""
q = Query(selects=['selects1, selects2'], froms=['from1', 'from2'])
s = q.__repr__()
# test will fail out on error from __repr__ call if Exception is hit
assert isinstance(s, str)
class AdminNameGetterTestCase(unittest.TestCase):
"""
Tests for app.weighter.AdminNameGetter
"""
# ----------------------- Before/After ----------------------- #
def setUp(self):
"""
Executed at the start of every test
"""
self.getter = None
return
def tearDown(self):
"""
Executed at the end of every test
"""
self.getter = None
return
# ----------------------- Helpers ----------------------- #
def init(self, admincodes=LocationAdminCodes()):
self.getter = AdminNameGetter(admincodes)
return
@nottest
def test_sql_func(self, func, attribute, value):
"""
Tests any of the AdminNameGetter's _sql_* functions
:param function func: function to test
:param str attribute: attribute to plug into sql
:param str value: value to plug into sql
"""
expected = "l.%s = '%s'" % (attribute, value)
actual = func()
assert expected == actual
# ----------------------- Tests ----------------------- #
def test__init__pass(self):
"""
Ensures that the app.weighter.AdminNameGetter successfully initializes
"""
CODE = LocationAdminCodes()
CODE.admin2code = 'testing'
self.init(CODE)
assert isinstance(self.getter, AdminNameGetter)
assert self.getter.codes == CODE
def test__sql_admin4code__pass(self):
"""
Tests AdminNameGetter._sql_admin4code
"""
attribute = 'admin4code'
value = 'banana'
# make codes
codes = LocationAdminCodes()
codes.admin4code = value
# init getter
self.init(codes)
# get func
func = self.getter._sql_admin4code
# run test
self.test_sql_func(func, attribute, value)
def test__sql_admin3code__pass(self):
"""
Tests AdminNameGetter._sql_admin3code
"""
attribute = 'admin3code'
value = 'world'
# make codes
codes = LocationAdminCodes()
codes.admin3code = value
# init getter
self.init(codes)
# get func
func = self.getter._sql_admin3code
# run test
self.test_sql_func(func, attribute, value)
def test__sql_admin2code__pass(self):
"""
Tests AdminNameGetter._sql_admin2code
"""
attribute = 'admin2code'
value = 'hello'
# make codes
codes = LocationAdminCodes()
codes.admin2code = value
# init getter
self.init(codes)
# get func
func = self.getter._sql_admin2code
# run test
self.test_sql_func(func, attribute, value)
def test__sql_admin1code__pass(self):
"""
Tests AdminNameGetter._sql_admin1code
"""
attribute = 'admin1code'
value = 'ice cream'
# make codes
codes = LocationAdminCodes()
codes.admin1code = value
# init getter
self.init(codes)
# get func
func = self.getter._sql_admin1code
# run test
self.test_sql_func(func, attribute, value)
def test__sql_countrycode__pass(self):
"""
Tests AdminNameGetter._sql_countrycode
"""
attribute = 'countrycode'
value = 'country 1'
# make codes
codes = LocationAdminCodes()
codes.countrycode = value
# init getter
self.init(codes)
# get func
func = self.getter._sql_countrycode
# run test
self.test_sql_func(func, attribute, value)
def test__sql_featurecode__pass(self):
"""
Tests AdminNameGetter._sql_featurecode
"""
attribute = 'featurecode'
# init getter
self.init()
# get value
INDEX = 1
value = self.getter.ADMIN_FEATURE_CODES[1]
# run test
expected = "l.%s = '%s'" % (attribute, value)
actual = self.getter._sql_featurecode(INDEX)
assert expected == actual
def test__countryname__pass(self):
"""
Tests AdminNameGetter._countryname
"""
# make codes
codes = LocationAdminCodes()
codes.countrycode = 'US'
# init getter
self.init(codes)
expected = 'United States'
actual = self.getter._countryname()
print actual
print expected
assert expected == actual
def test__admin1name__pass(self):
"""
Tests AdminNameGetter._admin1name
"""
# make codes
codes = LocationAdminCodes()
codes.admin1code = 'AZ'
codes.countrycode = 'US'
# init getter
self.init(codes)
expected = 'Arizona'
actual = self.getter._admin1name()
print actual
print expected
assert expected == actual
def test__admin2name__pass(self):
"""
Tests AdminNameGetter._admin2name
"""
# make codes
codes = LocationAdminCodes()
codes.admin4code = '8658294'
codes.admin3code = '8644152'
codes.admin2code = 'D8'
codes.admin1code = 'C'
codes.countrycode = 'UG'
# init getter
self.init(codes)
expected = 'Mityana District'
actual = self.getter._admin2name()
print actual
print expected
assert expected == actual
def test__admin3name__pass(self):
"""
Tests AdminNameGetter._admin3name
"""
# make codes
codes = LocationAdminCodes()
codes.admin4code = '8658294'
codes.admin3code = '8644152'
codes.admin2code = 'D8'
codes.admin1code = 'C'
codes.countrycode = 'UG'
# init getter
self.init(codes)
expected = 'Mityana'
actual = self.getter._admin3name()
print actual
print expected
assert expected == actual
def test__admin4name__pass(self):
"""
Tests AdminNameGetter._admin4name
"""
# make codes
codes = LocationAdminCodes()
codes.admin4code = '8658294'
codes.admin3code = '8644152'
codes.admin2code = 'D8'
codes.admin1code = 'C'
codes.countrycode = 'UG'
# init getter
self.init(codes)
expected = 'Mityana Town Council'
actual = self.getter._admin4name()
print actual
print expected
assert expected == actual
def test__adminnames__pass_acc3(self):
"""
Tests AdminNameGetter.adminnames with accuracy of 3
"""
ADM2 = '237'
ADM1 = 'GA'
COCO = 'US'
codes = LocationAdminCodes()
codes.featurecode = 'PPL'
codes.featureclass = 'P'
codes.featureclass = 'P'
codes.admin4code = None
codes.admin3code = None
codes.admin2code = ADM2
codes.admin1code = ADM1
codes.countrycode = COCO
self.init(codes)
expected = LocationAdminNames()
expected.admin4name = None
expected.admin3name = None
expected.admin2name = 'Putnam County'
expected.admin1name = 'Georgia'
expected.countryname = 'United States'
actual = self.getter.adminnames()
print 'expected -> ' + str(expected)
print 'actual -> ' + str(actual)
assert expected == actual
def test__adminnames__pass_acc5(self):
"""
Tests AdminNameGetter.adminnames with accuracy of 5
"""
codes = LocationAdminCodes()
codes.featurecode = 'PPL'
codes.featureclass = 'P'
codes.featureclass = 'P'
codes.admin4code = '8658294'
codes.admin3code = '8644152'
codes.admin2code = 'D8'
codes.admin1code = 'C'
codes.countrycode = 'UG'
self.init(codes)
expected = LocationAdminNames()
expected.admin4name = "Mityana Town Council"
expected.admin3name = 'Mityana'
expected.admin2name = 'Mityana District'
expected.admin1name = 'Central Region'
expected.countryname = 'Uganda'
actual = self.getter.adminnames()
print 'expected -> ' + str(expected)
print 'actual -> ' + str(actual)
assert expected == actual
def test__repr__(self):
"""
Tests AdminNameGetter.__repr__
"""
ADM2 = '237'
ADM1 = 'GA'
COCO = 'US'
codes = LocationAdminCodes()
codes.featurecode = 'PPL'
codes.featureclass = 'P'
codes.featureclass = 'P'
codes.admin4code = None
codes.admin3code = None
codes.admin2code = ADM2
codes.admin1code = ADM1
codes.countrycode = COCO
self.init(codes)
# test if any exceptions fire
s = self.getter.__repr__()
assert s is not None
assert isinstance(s, str)
class WeightifierTestCase(unittest.TestCase):
"""
Tests for app.weighter.Weightifier
"""
# ----------------------- Before/After ----------------------- #
def setUp(self):
"""
Executed at the start of every test
Instantiates a new instance of weighter.Weightifier()
"""
self.weightifier = Weightifier()
return
def tearDown(self):
"""
Executed at the end of every test
"""
self.weightifier = None
return
# ----------------------- Helpers ----------------------- #
def make_admin_codes_query_row(self, geonameid, name, featurecode,
featureclass, countrycode=None,
admin1code=None, admin2code=None,
admin3code=None, admin4code=None):
row = [geonameid, name, featurecode, featureclass]
if countrycode is not None:
row.append(countrycode)
if admin1code is not None:
row.append(admin1code)
if admin2code is not None:
row.append(admin2code)
if admin3code is not None:
row.append(admin3code)
if admin4code is not None:
row.append(admin4code)
return row
def _make_wrap(self, name, lat=0, lon=0, weight=0, countryname='None',
admin1name='None', admin2name='None', admin3name='None',
admin4name='None'):
return LocationWrap(
Location(
name,
-1,
name,
'???',
'???',
'???',
'???',
lat,
lon,
0),
weight=weight,
adminnames=LocationAdminNames(
countryname=countryname,
admin1name=admin1name,
admin2name=admin2name,
admin3name=admin3name,
admin4name=admin4name))
# ----------------------- Tests ----------------------- #
def test__init(self):
"""
Ensures that the weighter.Weightifier successfully initializes
"""
assert isinstance(self.weightifier, Weightifier)
def test__make_sql(self):
"""
Tests Weightifier._make_sql
"""
selects = "o.orange, a.apple, b.banana"
fromm = "FROM orange o, apple a, banana b"
where = "o.orange == a.apple"
expected = text(selects + '\n' + fromm + '\n' + where)
actual = self.weightifier._make_sql(selects, fromm, where)
assert expected.text == actual.text
def test__make_admin_codes_query__acc1(self):
"""
Tests Weightifier._make_admin_codes_query with accuracy of 1
"""
geonameid = 9001
query = ("SELECT l.geonameid, l.name, l.featurecode, l.featureclass, "
"l.countrycode"
"\nFROM raw_locations l\n"
"WHERE l.geonameid = '%s'" % str(geonameid))
expected = text(query)
actual = self.weightifier._make_admin_codes_query(geonameid, 1)
print expected.text
print actual.text
assert expected.text == actual.text
def test__make_admin_codes_query__acc2(self):
"""
Tests Weightifier._make_admin_codes_query with accuracy of 2
"""
geonameid = 9001
query = ("SELECT l.geonameid, l.name, l.featurecode, l.featureclass, "
"l.countrycode, l.admin1code"
"\nFROM raw_locations l\n"
"WHERE l.geonameid = '%s'" % str(geonameid))
expected = text(query)
actual = self.weightifier._make_admin_codes_query(geonameid, 2)
assert expected.text == actual.text
def test__make_admin_codes_query__acc3(self):
"""
Tests Weightifier._make_admin_codes_query with accuracy of 3
"""
geonameid = 9001
query = ("SELECT l.geonameid, l.name, l.featurecode, l.featureclass, "
"l.countrycode, l.admin1code, l.admin2code"
"\nFROM raw_locations l\n"
"WHERE l.geonameid = '%s'" % str(geonameid))
expected = text(query)
actual = self.weightifier._make_admin_codes_query(geonameid, 3)
assert expected.text == actual.text
def test__make_admin_codes_query__acc4(self):
"""
Tests Weightifier._make_admin_codes_query with accuracy of 4
"""
geonameid = 9001
query = ("SELECT l.geonameid, l.name, l.featurecode, l.featureclass, "
"l.countrycode, l.admin1code, l.admin2code, l.admin3code"
"\nFROM raw_locations l\n"
"WHERE l.geonameid = '%s'" % str(geonameid))
expected = text(query)
actual = self.weightifier._make_admin_codes_query(geonameid, 4)
assert expected.text == actual.text
def test__make_admin_codes_query__acc5(self):
"""
Tests Weightifier._make_admin_codes_query with accuracy of 5
"""
geonameid = 9001
query = ("SELECT l.geonameid, l.name, l.featurecode, l.featureclass, "
"l.countrycode, l.admin1code, l.admin2code, l.admin3code, "
"l.admin4code\nFROM raw_locations l\n"
"WHERE l.geonameid = '%s'" % str(geonameid))
expected = text(query)
actual = self.weightifier._make_admin_codes_query(geonameid, 5)
print expected.text
print actual.text
assert expected.text == actual.text
def test__make_admin_codes__pass_0(self):
"""
Tests weighter.Weightifier._make_admin_codes with accuracy of 0
"""
GEONAMEID = 5308655
NAME = 'Phoenix'
CODE = 'PPLA'
CLASS = 'P'
row = self.make_admin_codes_query_row(
geonameid=GEONAMEID,
name=NAME,
featurecode=CODE,
featureclass=CLASS)
expected = LocationAdminCodes()
expected.geonameid = GEONAMEID
expected.name = NAME
expected.featurecode = CODE
expected.featureclass = CLASS
actual = self.weightifier._make_admin_codes(row)
assert expected == actual
def test__make_admin_codes__pass_5(self):
"""
Tests weighter.Weightifier._make_admin_codes with accuracy of 5
"""
GEONAMEID = 5308655
NAME = 'Phoenix'
CODE = 'PPLA'
CLASS = 'P'
ADMIN4 = ''
ADMIN3 = ''
ADMIN2 = 'Maricopa County'
ADMIN1 = 'Arizona'
COUNTRY = 'United States'
row = self.make_admin_codes_query_row(
geonameid=GEONAMEID,
name=NAME,
featurecode=CODE,
featureclass=CLASS,
countrycode=COUNTRY,
admin1code=ADMIN1,
admin2code=ADMIN2,
admin3code=ADMIN3,
admin4code=ADMIN4)
expected = LocationAdminCodes()
expected.geonameid = GEONAMEID
expected.name = NAME
expected.featurecode = CODE
expected.featureclass = CLASS
expected.countrycode = COUNTRY
expected.admin1code = ADMIN1
expected.admin2code = ADMIN2
expected.admin3code = ADMIN3
expected.admin4code = ADMIN4
actual = self.weightifier._make_admin_codes(row)
print 'actual -> %s' % str(actual)
print 'expected -> %s' % str(expected)
assert expected == actual
def test__get_admin_codes__pass(self):
"""
Tests weighter.Weightifier._get_admin_codes with accuracy of 5
"""
expected = LocationAdminCodes()
expected.geonameid = '8658294'
expected.name = 'Mityana Town Council'
expected.featurecode = 'ADM4'
expected.featureclass = 'A'
expected.admin4code = '8658294'
expected.admin3code = '8644152'
expected.admin2code = 'D8'
expected.admin1code = 'C'
expected.countrycode = 'UG'
actual = self.weightifier._get_admin_codes(8658294, 5)
print expected
print actual
assert expected == actual
def test__get_admin_names__pass(self):
"""
Tests weighter.Weightifier._get_admin_names with accuracy of 5
"""
codes = LocationAdminCodes()
codes.admin4code = '8658294'
codes.admin3code = '8644152'
codes.admin2code = 'D8'
codes.admin1code = 'C'
codes.countrycode = 'UG'
expected = LocationAdminNames()
expected.admin4name = "Mityana Town Council"
expected.admin3name = 'Mityana'
expected.admin2name = 'Mityana District'
expected.admin1name = 'Central Region'
expected.countryname = 'Uganda'
actual = self.weightifier._get_admin_names(codes)
assert expected == actual
def test__gather_all_names__pass(self):
"""
Tests weighter._gather_all_names with accuracy of 4
"""
geolocator = Geolocator()
container = geolocator._build_container(['Phoenix'])
# expected has expected admin names (of accuracy 4)
# with locations "Arizona" and "Phoenix"
expected = LocationHitsContainer()
expected.append(LocationHits('Phoenix', [
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-34.96862,
139.18517,
0),
adminnames=LocationAdminNames(
countryname='Australia',
admin1name='State of South Australia',
admin2name='Mid Murray',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
49.09979,
-118.58562,
0),
adminnames=LocationAdminNames(
countryname='Canada',
admin1name='British Columbia',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
37.78651,
20.89943,
0),
adminnames=LocationAdminNames(
countryname='Greece',
admin1name='Ionian Islands',
admin2name='Noms Zaknthou',
admin3name='Dimos Zakynthos',
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
6.90348,
-58.45244,
0),
adminnames=LocationAdminNames(
countryname='Guyana',
admin1name='Essequibo Islands-West Demerara Region',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
6.46198,
-57.64923,
0),
adminnames=LocationAdminNames(
countryname='Guyana',
admin1name='Mahaica-Berbice Region',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-7.78178,
110.37814,
0),
adminnames=LocationAdminNames(
countryname='Indonesia',
admin1name='Daerah Istimewa Yogyakarta',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
18.42516,
-77.72388,
0),
adminnames=LocationAdminNames(
countryname='Jamaica',
admin1name='Parish of Trelawny',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
18.36667,
-78.28333,
0),
adminnames=LocationAdminNames(
countryname='Jamaica',
admin1name='Parish of Hanover',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
35.09843,
129.02964,
0),
adminnames=LocationAdminNames(
countryname='South Korea',
admin1name='Busan',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-20.28667,
57.50222,
0),
adminnames=LocationAdminNames(
countryname='Mauritius',
admin1name='Plaines Wilhems District',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
46.20074,
21.28622,
0),
adminnames=LocationAdminNames(
countryname='Romania',
admin1name='Arad',
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
1.30082,
103.84015,
0),
adminnames=LocationAdminNames(
countryname='Singapore',
admin1name=None,
admin2name=None,
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
33.36597,
-83.27766,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Georgia',
admin2name='Putnam County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
29.64605,
-89.93979,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Louisiana',
admin2name='Plaquemines Parish',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
39.51649,
-76.61608,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Maryland',
admin2name='Baltimore County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
32.58125,
-90.56287,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Mississippi',
admin2name='Yazoo County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
34.2935,
-78.05833,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='North Carolina',
admin2name='Brunswick County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
34.07735,
-82.11095,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='South Carolina',
admin2name='Greenwood County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
29.3819,
-98.54168,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Texas',
admin2name='Bexar County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
41.61115,
-87.63477,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Illinois',
admin2name='Cook County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
47.38881,
-88.27761,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Michigan',
admin2name='Keweenaw County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
40.47788,
-74.31293,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='New Jersey',
admin2name='Middlesex County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
40.52983,
-74.34098,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='New Jersey',
admin2name='Middlesex County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
43.23118,
-76.30076,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='New York',
admin2name='Oswego County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
33.44838,
-112.07404,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Arizona',
admin2name='Maricopa County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
39.92689,
-112.1105,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Utah',
admin2name='Juab County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
42.27541,
-122.81809,
0),
adminnames=LocationAdminNames(
countryname='United States',
admin1name='Oregon',
admin2name='Jackson County',
admin3name=None,
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-29.71667,
31.01667,
0),
adminnames=LocationAdminNames(
countryname='South Africa',
admin1name='Province of KwaZulu-Natal',
admin2name='eThekwini Metropolitan Municipality',
admin3name='Ethekwini',
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-29.70428,
30.9761,
0),
adminnames=LocationAdminNames(
countryname='South Africa',
admin1name='Province of KwaZulu-Natal',
admin2name='eThekwini Metropolitan Municipality',
admin3name='Ethekwini',
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-26.07098,
29.20451,
0),
adminnames=LocationAdminNames(
countryname='South Africa',
admin1name='Mpumalanga',
admin2name='Nkangala District Municipality',
admin3name='Emalahleni',
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-28.3,
26.81667,
0),
adminnames=LocationAdminNames(
countryname='South Africa',
admin1name='Free State',
admin2name='Lejweleputswa District Municipality',
admin3name='Masilonyana',
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-25.71076,
23.04885,
0),
adminnames=LocationAdminNames(
countryname='South Africa',
admin1name='Province of North West',
admin2name='Dr Ruth Segomotsi Mompati District'
' Municipality',
admin3name='Kagisano/Molopo',
admin4name=None)),
LocationWrap(
Location(
'Phoenix',
-1,
'Phoenix',
'???',
'???',
'???',
'???',
-16.71667,
29.78333,
0),
adminnames=LocationAdminNames(
countryname='Zimbabwe',
admin1name=None,
admin2name=None,
admin3name=None,
admin4name=None))]))
# actual is application-generated admin names
actual = self.weightifier.gather_all_names(container, 4)
# only one location ("Phoenix") so only one hits
assert len(expected.hits[0]) == len(actual.hits[0])
# we can't do a straight expected to actual comparison because
# the _build_container method will populate all of the
# LocationWrap.Location fields which is data that we don't have
# so, we compare the important data that we do have - the adminnames
for i in xrange(len(expected.hits[0])):
print i
# debug
try:
print 'expected -----------------------'
print expected.hits[0].locations[i].location.name
print expected.hits[0].locations[i]._weight
print expected.hits[0].locations[i].adminnames
print 'actual -------------------------'
print actual.hits[0].locations[i].location.name
print actual.hits[0].locations[i]._weight
print actual.hits[0].locations[i].adminnames
except UnicodeEncodeError:
print ('Unicode error for %s' %
expected.hits[0].locations[i].location.name)
# check name
assert (expected.hits[0].locations[i].location.name ==
actual.hits[0].locations[i].location.name)
# check weight
assert (expected.hits[0].locations[i]._weight ==
actual.hits[0].locations[i]._weight)
# check adminnames
assert (expected.hits[0].locations[i].adminnames ==
actual.hits[0].locations[i].adminnames)
return
def test__filter_by_weight(self):
"""
Tests weighter._filter_by_weight
"""
Hits1 = 'Hits1'
Hits2 = 'Hits2'
L2 = LocationWrap(
Location(
'L2',
-1,
'L2',
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=16,
adminnames=LocationAdminNames(
countryname='None',
admin1name='None',
admin2name='None',
admin3name='None',
admin4name='None'))
L10 = LocationWrap(
Location(
'L10',
-1,
'L10',
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=1,
adminnames=LocationAdminNames(
countryname='None',
admin1name='None',
admin2name='None',
admin3name='None',
admin4name='None'))
container = LocationHitsContainer()
# Hits1 - max weight is 16 from L2
container.append(LocationHits(Hits1, [
LocationWrap(
Location(
'L1',
-1,
'L1',
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=7,
adminnames=LocationAdminNames(
countryname='None',
admin1name='None',
admin2name='None',
admin3name='None',
admin4name='None')),
L2,
LocationWrap(
Location(
'L3',
-1,
'L3',
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=3,
adminnames=LocationAdminNames(
countryname='None',
admin1name='None',
admin2name='None',
admin3name='None',
admin4name='None'))]))
# Hits2 - max weight is 1 from L10
container.append(LocationHits(Hits2, [
L10,
LocationWrap(
Location(
'L11',
-1,
'L11',
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=0,
adminnames=LocationAdminNames(
countryname='None',
admin1name='None',
admin2name='None',
admin3name='None',
admin4name='None'))]))
expected = LocationHitsContainer()
expected.append(LocationHits(Hits1, [L2]))
expected.append(LocationHits(Hits2, [L10]))
actual = self.weightifier._filter_by_weight(container)
assert expected == actual
def test__back_weight1(self):
"""
Tests weighter.back_weight (1)
"""
tagged_location = 'Camelot'
L2_NAME = 'A funny place'
COUNTRYNAME = 'England'
A1 = 'Some Province'
Hits1 = 'Hits1'
L2 = LocationWrap(
Location(
tagged_location,
-1,
L2_NAME,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=16,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name=tagged_location,
admin3name='None',
admin4name='None'))
hits = LocationHits(Hits1, [
LocationWrap(
Location(
tagged_location,
-1,
tagged_location,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=7,
adminnames=LocationAdminNames(
countryname='None',
admin1name='None',
admin2name='None',
admin3name='None',
admin4name='None')),
L2,
LocationWrap(
Location(
tagged_location,
-1,
tagged_location,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=3,
adminnames=LocationAdminNames(
countryname='None',
admin1name='None',
admin2name='None',
admin3name='None',
admin4name='None'))])
matched_location_name = "King Arthur's Castle"
matched_location = LocationWrap(
Location(
matched_location_name,
-1,
matched_location_name,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=16,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name=tagged_location,
admin3name='None',
admin4name='None'))
expected = [L2_NAME]
actual = self.weightifier._back_weight(
hits,
tagged_location,
matched_location)
print expected
print actual
assert expected == actual
def test__back_weight2(self):
"""
Tests weighter.back_weight (2)
"""
tagged_location = 'Camelot'
L2_NAME = 'A funny place'
L3_NAME = 'some random name'
COUNTRYNAME = 'England'
A1 = 'Some Province'
Hits1 = 'Hits1'
L2 = LocationWrap(
Location(
tagged_location,
-1,
L2_NAME,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=16,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name=tagged_location,
admin3name='None',
admin4name='None'))
L3 = LocationWrap(
Location(
tagged_location,
-1,
L3_NAME,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=3,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name='some random A2',
admin3name='None',
admin4name='None'))
hits = LocationHits(Hits1, [
LocationWrap(
Location(
tagged_location,
-1,
tagged_location,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=7,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name='not A funny place',
admin2name='None',
admin3name='None',
admin4name='None')),
L2,
L3])
matched_location_name = "King Arthur's Castle"
matched_location = LocationWrap(
Location(
matched_location_name,
-1,
matched_location_name,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=16,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name=tagged_location,
admin3name='None',
admin4name='None'))
expected = [L2_NAME]
actual = self.weightifier._back_weight(
hits,
tagged_location,
matched_location)
print expected
print actual
assert expected == actual
def test__back_weight__acc5(self):
"""
Tests weighter.back_weight with accuracy of 5
"""
L2_NAME = 'A funny place'
L3_NAME = 'some random name'
COUNTRYNAME = 'England'
A1 = 'Some Province'
A2 = "King Arthur's Castle"
A3 = 'The Round Table'
Hits1 = 'Hits1'
tagged_location = A3
# this is the location that will be back weighted
L2 = LocationWrap(
Location(
A3,
-1,
L2_NAME,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=16,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name=A2,
admin3name=A3,
admin4name='None'))
L3 = LocationWrap(
Location(
tagged_location,
-1,
L3_NAME,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=3,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name='some random A2',
admin3name='None',
admin4name='None'))
# these are the hits that were all retrieved for The Round Table
hits = LocationHits(Hits1, [
LocationWrap(
Location(
tagged_location,
-1,
tagged_location,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=7,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name='not A funny place',
admin2name='None',
admin3name='None',
admin4name='None')),
L2,
L3])
# this is the location that contains a reference to The Round Table
matched_location_name = "King Arthur's Chair"
matched_location = LocationWrap(
Location(
matched_location_name,
-1,
matched_location_name,
'???',
'???',
'???',
'???',
-35.72259,
-65.31972,
0),
weight=16,
adminnames=LocationAdminNames(
countryname=COUNTRYNAME,
admin1name=A1,
admin2name=A2,
admin3name=A3,
admin4name=matched_location_name))
expected = [L2_NAME]
actual = self.weightifier._back_weight(
hits,
tagged_location,
matched_location)
print expected
print actual
assert expected == actual
def test__weightify(self):
"""
Tests weighter.weightify
"""
# build container
# this data was taken from a test run
# with "I live in Phoenix, Arizona"
AZ = 'Arizona'
AZ_LAT = 34.5003
AZ_LON = -111.50098
PHX = 'PHOENIX'
PHX_LAT = 33.44838
PHX_LON = -112.07404
US = 'United States'
AZ_WRAP = self._make_wrap(
name=AZ,
lat=AZ_LAT,
lon=AZ_LON,
weight=0,
countryname=US,
admin1name=AZ)
PHX_WRAP = self._make_wrap(
name=PHX,
lat=PHX_LAT,
lon=PHX_LON,
weight=0,
countryname=US,
admin1name=AZ)
container = LocationHitsContainer()
container.append(LocationHits(AZ, [
LocationWrap(
Location(
AZ,
-1,
AZ,
'???',
'???',
'???',
'???',
32.78904,
-92.95766,
0),
weight=0,
adminnames=LocationAdminNames(
countryname=US,
admin1name='Louisiana',
admin2name='Claiborne Parish',
admin3name='None',
admin4name='None')),
LocationWrap(
Location(
AZ,
-1,
AZ,
'???',
'???',
'???',
'???',
30.78963,
-95.468,
0),
weight=0,
adminnames=LocationAdminNames(
countryname=US,
admin1name='Texas',
admin2name='Walker County',
admin3name='None',
admin4name='None')),
LocationWrap(
Location(
AZ,
-1,
AZ,
'???',
'???',
'???',
'???',
41.81443,
-96.13363,
0),
weight=0,
adminnames=LocationAdminNames(
countryname=US,
admin1name='Nebraska',
admin2name='Burt County',
admin3name='None',
admin4name='None')),
AZ_WRAP]))
# Hits2 - max weight is 1 from L10
container.append(LocationHits(PHX, [PHX_WRAP]))
AZ_WRAP = self._make_wrap(
name=AZ,
lat=AZ_LAT,
lon=AZ_LON,
weight=1,
countryname=US,
admin1name=AZ)
PHX_WRAP = self._make_wrap(
name=PHX,
lat=PHX_LAT,
lon=PHX_LON,
weight=1,
countryname=US,
admin1name=AZ)
expected = LocationHitsContainer()
expected.append(LocationHits(AZ, [AZ_WRAP]))
expected.append(LocationHits(PHX, [PHX_WRAP]))
actual = self.weightifier.weightify(container)
print expected
print actual
assert expected == actual
def test__repr(self):
"""
Tests weighter.Weightifier.__repr__ to ensure that a str is returned
and that no errors are generated
"""
s = self.weightifier.__repr__()
# this test will fail by error at __repr__ call if errors are generated
assert isinstance(s, str)
# def test_admin4named_locations():
# """
# Returns a giant list of all locations in geonames that have
# a populated admin4name field
# """
# accuracy = 7
# select = ('SELECT l.geonameid, l.name, l.featurecode, l.featureclass')
# if accuracy > 0:
# select += ', l.countrycode'
# if accuracy > 1:
# select += ', l.admin1code'
# if accuracy > 2:
# select += ', l.admin2code'
# if accuracy > 3:
# select += ', l.admin3code'
# if accuracy > 4:
# select += ', l.admin4code'
# fromm = 'FROM raw_locations l'
# where = 'WHERE NOT l.admin4code = \'\''
# sql = text('%s\n%s\n%s' % (select, fromm, where))
# # should return only 1
# result = db.engine.execute(sql)
# admincodes = []
# for row in result:
# admincodes.append(row)
# print admincodes
# assert False
|
{
"content_hash": "298bb13e5e243db69fe579e9c9110a36",
"timestamp": "",
"source": "github",
"line_count": 2161,
"max_line_length": 79,
"avg_line_length": 31.92133271633503,
"alnum_prop": 0.4299237482241744,
"repo_name": "gios-asu/text-geolocator",
"id": "1892a55377f11fa9d8fcd48b0b935fa3e30f4e40",
"size": "69007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geolocator/app/tests/test_weighter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6103"
},
{
"name": "HTML",
"bytes": "2038239"
},
{
"name": "Java",
"bytes": "3830"
},
{
"name": "JavaScript",
"bytes": "15796"
},
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "220910"
},
{
"name": "Shell",
"bytes": "1762"
}
],
"symlink_target": ""
}
|
"""
Django settings for SED project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rmxn_z8zon22frz-yi65qjob*xf3fqpj^$$1!v4#3qf4a7i4a0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website.apps.WebsiteConfig',
'widget_tweaks',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SED.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'SED.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# MEDIA URL for storing files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
#
LOGIN_URL = '/website/account/login/'
LOGOUT_URL = '/website/account/logout/'
|
{
"content_hash": "65c977646ca9698e671e3daf51ac9e9e",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 91,
"avg_line_length": 26.143939393939394,
"alnum_prop": 0.6879165459287163,
"repo_name": "abulbakha/sed",
"id": "ed71abf75a4407cdb7a37863f41a934c210a8685",
"size": "3451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SED/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3684"
},
{
"name": "HTML",
"bytes": "113583"
},
{
"name": "JavaScript",
"bytes": "6673"
},
{
"name": "Python",
"bytes": "58638"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
import urllib
import urllib2
url = "http://www.portal.state.pa.us/portal/server.pt/community/2014/21611/february_2014_warn_notices/1725222"
content = urllib2.urlopen(url).read()
soup = BeautifulSoup(content)
for link in soup.find_all('a'):
print(link.get('href'))
for table in soup.find_all('tr'):
print(link.get_text())
|
{
"content_hash": "c3530a7372681ac21f72aee055df6653",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 110,
"avg_line_length": 25.928571428571427,
"alnum_prop": 0.7327823691460055,
"repo_name": "frankcash/Misc",
"id": "1ed8fb579d1011445f5a1793a216f93c740a41fb",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/scrapePaLinks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1568"
},
{
"name": "C",
"bytes": "1344"
},
{
"name": "C++",
"bytes": "4322"
},
{
"name": "CSS",
"bytes": "7097"
},
{
"name": "Clojure",
"bytes": "2395"
},
{
"name": "HTML",
"bytes": "19939"
},
{
"name": "Haskell",
"bytes": "5429"
},
{
"name": "Java",
"bytes": "5909"
},
{
"name": "JavaScript",
"bytes": "52548"
},
{
"name": "Python",
"bytes": "15219"
},
{
"name": "Scala",
"bytes": "1538"
}
],
"symlink_target": ""
}
|
"""
flexstation.py
.. moduleauthor:: Guillaume Prevost <guillaume.prevost@rmit.edu.au>
"""
import logging
from itertools import izip_longest, imap
from struct import unpack
from tardis.tardis_portal.models import Schema, DatafileParameterSet
from tardis.tardis_portal.models import ParameterName, DatafileParameter
from os import path
from struct import *
import binascii
import pdb
import time
import string
from datetime import date
logger = logging.getLogger(__name__)
class FlexstationFilter(object):
NUMBER_OF_ROWS = 8
HEADER_END_DELIMITER = "\x48\x00\x00\x00\x48\x00\x00\x00"
def __init__(self, name, schema):
"""This filter extract meta-data from file under the PDA format (proprietary format generated by SoftMax Pro)
:param name: the short name of the schema.
:type name: str
:param schema: the name of the schema to load the PDA meta-data into.
:type schema: str
"""
self.name = name
self.schema = schema
self.paramnames = (
{'name': 'softmax_version', 'full_name': 'SoftMax software version', 'data_type': ParameterName.STRING}, # Version of the SoftMax software
{'name': 'experiment_name', 'full_name': 'Experiment Name', 'data_type': ParameterName.STRING}, # Name of the experiment
{'name': 'analysis_notes', 'full_name': 'Analysis Notes', 'data_type': ParameterName.STRING}, # Note about the experiment
{'name': 'instrument_info', 'full_name': 'Instrument Info', 'data_type': ParameterName.STRING}, # Details of the instrument used.
{'name': 'plate_read_time', 'full_name': 'Plate Read Time', 'data_type': ParameterName.DATETIME}, # Date and time the plate read was done.
#{'name': 'section_type', 'full_name': 'Section Type', 'data_type': ParameterName.STRING}, # Section Kind, either Plate or Cuvette.
#{'name': 'export_format', 'full_name': 'Export Format', 'data_type': ParameterName.STRING}, # PlateFormat or TimeFormat, set in Preferences
{'name': 'strips', 'full_name': 'Strips Read', 'data_type': ParameterName.STRING}, #
{'name': 'read_type', 'full_name': 'Read Type', 'data_type': ParameterName.STRING}, # Endpoint, Kinetic, Spectrum, Well Scan, or Flex
{'name': 'data_mode', 'full_name': 'Data Mode', 'data_type': ParameterName.STRING}, # For absorbance plates: Absorbance or % Transmittance. For others: Fluorescence, Luminescence, or Time Resolved Fluorescence.
{'name': 'data_type', 'full_name': 'Data Type', 'data_type': ParameterName.STRING}, # Raw or Reduced.
{'name': 'trans', 'full_name': 'Trans', 'data_type': ParameterName.STRING}, # example: (H=80u,R=4, V=20u@15)
{'name': 'kinetic_points', 'full_name': 'Kinetic Points', 'data_type': ParameterName.NUMERIC}, # Reduced plates and Endpoint plates = 1 Kinetic / Spectrum plates: number of reads
{'name': 'kinetic_flex_read_time', 'full_name': 'Kinetic/Flex Time', 'data_type': ParameterName.NUMERIC}, # Kinetic/Flex read time in seconds.
{'name': 'kinetic_flex_interval', 'full_name': 'Kinetic/Flex Interval', 'data_type': ParameterName.NUMERIC}, # Kinetic or Flex read interval in seconds
{'name': 'well_scan_read_pattern', 'full_name': 'Well Scan Read Pattern', 'data_type': ParameterName.STRING}, # Well Scan read pattern (horizontal, vertical, X or fill)
{'name': 'well_scan_density', 'full_name': 'Well Scan Density', 'data_type': ParameterName.STRING}, # Well scan density
#{'name': 'start_wavelength', 'full_name': 'Start Wavelength', 'data_type': ParameterName.STRING}, # Spectrum start wavelength; otherwise blank field.
#{'name': 'end_wavelength', 'full_name': 'End Wavelength', 'data_type': ParameterName.STRING}, # Spectrum end wavelength; otherwise blank field.
#{'name': 'wavelength_step', 'full_name': 'Wavelength Step', 'data_type': ParameterName.STRING}, # Spectrum wavelength step; otherwise blank field.
{'name': 'number_of_wavelengths', 'full_name': 'Number of Wavelengths', 'data_type': ParameterName.NUMERIC}, # Number of wavelengths in Endpoint, Kinetic, Well Scan, Flex reads; for Spectrum; blank field.
{'name': 'read_wavelength', 'full_name': 'Read Wavelength', 'data_type': ParameterName.STRING}, # For Absorbance / Luminescence; wavelengths read, separated by a space; for Spectrum; blank field; for Fluorescence: emission wavelengths; for Ex / Em sweep: blank field.
{'name': 'number_of_wells_or_cuvette', 'full_name': 'Number of Wells or Cuvette', 'data_type': ParameterName.NUMERIC}, # Number of wells in a plate (24, 96, 384, etc.) or number of cuvettes read for a CuvetteSet.
{'name': 'excitation_wavelengths', 'full_name': 'Excitation Wavelengths', 'data_type': ParameterName.STRING}, # Excitation wavelengths, separated by spaces; For Luminescence or sweeps, this is a blank field (the emSweep excitation wavelength is in field 16).
{'name': 'read_per_well', 'full_name': 'Read Per Well', 'data_type': ParameterName.NUMERIC}, # Number of times a well is read for a single reading.
{'name': 'pmt_settings', 'full_name': 'PMT Settings', 'data_type': ParameterName.STRING}, # Automatic, High, Medium, or Low
#{'name': 'start_integration_settings', 'full_name': 'Start Integration Time'}, # Time to start integration for Time Resolved Fluorescence; otherwise, blank field.
#{'name': 'end_integration_time', 'full_name': 'End Integration Time'}, # Time to end integration for Time Resolved Fluorescence; otherwise, blank field.
)
def __call__(self, sender, **kwargs):
"""Post-save callback entry point.
:param sender: The model class.
:param instance: The actual instance being saved.
:param created: A boolean; True if a new record was created.
:type created: bool
"""
try:
print("Running Flexstation filter...")
instance = kwargs.get('instance') # get the file in the database
filepath = instance.get_absolute_filepath() # get the real location of the file
mimetype = instance.get_mimetype() # get the file type
logger.info(filepath)
logger.info(mimetype)
# exit if we're not looking at a PDA file
if mimetype != 'application/octet-stream' and filepath.endswith((".pda", ".PDA")):
return None
# get or create the schema to hold these parameters
schema = self.getSchema()
# get or create the parameter definitions if they don't exist
pn = self.getOrCreateParameterNames(schema, self.paramnames)
# set the metadata (a dictionary of dictionaries)
metadata = self.extractMetadata(filepath)
self.saveFlexstationMetadata(instance, schema, metadata) # save this metadata to a file
except Exception as e:
# if anything goes wrong, log it in tardis.log and exit
print(e)
logger.info(e)
return None
def extractMetadata(self, target):
"""Extracts the metadata from a PDA file (binary produced by SoftMax Pro)
:param target: the path of the PDA file to extract metadata from
:type target: str
:returns metadata: the dictionary of the extracted metadata
:type metadata: dict
"""
metadata = {}
with open(target, 'rb') as f:
# Checks the version number, abort if different from the expected one
self.readStringUntilDelimiter(f)
f.read(1)
pdaVersion = self.readStringUntilDelimiter(f).strip()
if not pdaVersion.startswith("5."):
print("Unsupported PDA file version '{0}' (minimum v5). Metadata can't be extracted.".format(pdaVersion))
return {}
metadata['softmax_version'] = pdaVersion
f.seek(f.tell() + 1)
numberOfDatasets = self.readStringUntilDelimiter(f)
numberOfDatasets = int(string.strip(string.split(numberOfDatasets, "=")[1], "\r "))
# Read until the last occurence of the header's end delimiter
fileIndexSave = f.tell()
while self.readStringUntilStringDelimiter(f, self.HEADER_END_DELIMITER) != None:
fileIndexSave = f.tell()
f.seek(fileIndexSave)
i = 0
while (i < numberOfDatasets):
self.readDataset(f, metadata)
i += 1
return metadata
def readDataset(self, f, metadata):
"""Extracts the metadata from a PDA file (binary produced by SoftMax Pro)
:param f: the opened PDA file to read
:type f: file
:param metadata: the dictionary to add metadata into
:type metadata: dict
:returns metadata: the dictionary of the extracted metadata
:type metadataa: dict
"""
# Experiment Name
try:
experimentName = self.readExperimentSection(f)
if experimentName != None:
metadata['experiment_name'] = experimentName
else:
raise error
except:
print('Failed to extract experiment name from PDA file.')
logger.error('Failed to extract experiment name from PDA file.')
fileIndexSave = f.tell()
while (1 == 1):
f.seek(f.tell() + 4) # skip a 4-bytes number
tmplGroupTitle = self.readTmplGroup(f)
if (tmplGroupTitle != None):
fileIndexSave = f.tell()
else:
f.seek(fileIndexSave)
f.seek(f.tell() + 4) # skip a 4-bytes number
tmplSampleTitle = self.readTmplSample(f)
if (tmplSampleTitle != None):
fileIndexSave = f.tell()
else:
f.seek(fileIndexSave)
numberHex = binascii.hexlify(f.read(4))
number = int(numberHex, 16)
if (number != 0):
f.seek(fileIndexSave)
if ('analysis_notes' not in metadata):
f.seek(f.tell() + 4) # skips 4 bytes on the 1st occurence of an analysis section
analysisName, analysisContent = self.readAnalysisSection(f)
if (analysisName == None or analysisContent == None):
f.seek(fileIndexSave)
break
elif analysisContent:
fileIndexSave = f.tell()
if ('analysis_notes' in metadata):
metadata['analysis_notes'] = str.format("{0}. {1}: {2}", metadata['analysis_notes'], \
analysisName, analysisContent)
else:
metadata['analysis_notes'] = str.format("{0}: {1}", analysisName, analysisContent)
# Number of Wells
try:
fileIndexSave = f.tell()
numberOfWells = self.readWells(f)
if (numberOfWells):
metadata['number_of_wells_or_cuvette'] = numberOfWells
except:
f.seek(fileIndexSave)
print('Failed to extract number of wells or cuvettes from PDA file.')
logger.error('Failed to extract number of wells or cuvettes from PDA file.')
fileIndexSave = f.tell()
while (1 == 1):
self.skipIfNumber(f, [0, 1, 2])
tmplGroupTitle = self.readTmplGroup(f)
if (tmplGroupTitle != None):
fileIndexSave = f.tell()
else:
f.seek(fileIndexSave)
self.skipIfNumber(f, [0, 1, 2])
tmplSampleTitle = self.readTmplSample(f)
if (tmplSampleTitle != None):
fileIndexSave = f.tell()
else:
f.seek(fileIndexSave)
self.skipIfNumber(f, [0, 2])
#if ('analysis_notes' not in metadata):
#f.seek(f.tell() + 4) # skips 8 bytes on the 1st occurence of an analysis section
analysisName, analysisContent = self.readAnalysisSection(f)
if (analysisName == None or analysisContent == None):
f.seek(fileIndexSave)
break
fileIndexSave = f.tell()
if analysisContent:
if ('analysis_notes' in metadata):
metadata['analysis_notes'] = str.format("{0}. {1}: {2}", metadata['analysis_notes'], \
analysisName, analysisContent)
else:
metadata['analysis_notes'] = str.format("{0}: {1}", analysisName, analysisContent)
# Plate Section
try:
self.skipIfNumber(f, [0])
self.skipIfNumber(f, [6])
fileIndexSave = f.tell()
plateName = self.readPlateSection(f)
if (plateName == None):
raise error
except:
f.seek(fileIndexSave)
print('Failed to read plate section from PDA file.')
logger.error('Failed to read plate section from PDA file.')
# Plate Data
numberOfColumns = 0
try:
fileIndexSave = f.tell()
firstReadColumn, numberOfColumns, readNumber, wavelengthsNumber, emValues, readDuration, readInterval, exValues, trans = self.readPlateData(f)
if numberOfColumns > 1:
metadata['strips'] = str.format("{0}-{1}", firstReadColumn, firstReadColumn + numberOfColumns - 1)
else:
metadata['strips'] = str.format("{0}", firstReadColumn)
if (wavelengthsNumber):
metadata['number_of_wavelengths'] = wavelengthsNumber
if (readNumber):
metadata['kinetic_points'] = readNumber
if (readDuration):
metadata['kinetic_flex_read_time'] = readDuration
if (readInterval):
metadata['kinetic_flex_interval'] = readInterval
# TODO: alternate these in function of the read type
#metadata['well_scan_read_pattern'] = ''
#metadata['well_scan_density'] = ''
if (emValues):
metadata['read_wavelength'] = emValues
if (exValues):
metadata['excitation_wavelengths'] = exValues
if (trans):
metadata['trans'] = trans
except:
f.seek(fileIndexSave)
print('Failed to extract plate data from PDA file.')
logger.error('Failed to extract plate data from PDA file.')
# Plate Descriptor
try:
fileIndexSave = f.tell()
numberOfPlates = self.readPlateDescriptor(f)
if (numberOfPlates == None):
raise error
except:
f.seek(fileIndexSave)
print('Failed to read plate descriptor from PDA file.')
logger.error('Failed to read plate descriptor from PDA file.')
# Flex Sites (Actual Data)
try:
fileIndexSave = f.tell()
numberOfFlexSites = self.readFlexSites(f, numberOfColumns)
if (numberOfFlexSites == None or numberOfFlexSites == 0):
raise error
except:
f.seek(fileIndexSave)
print('Failed to read flex sites from PDA file.')
logger.error('Failed to read flex sites from PDA file.')
# Plate Body
try:
fileIndexSave = f.tell()
wavelength, wavelengthCombination, formula, unknown, instrumentInfos = self.readCalcPlateBody(f)
if (wavelengthCombination):
metadata['wavelength_combination'] = wavelengthCombination
if (instrumentInfos):
metadata['instrument_info'] = instrumentInfos
except:
f.seek(fileIndexSave)
print('Failed to read plate body from PDA file.')
logger.error('Failed to read plate body from PDA file.')
# TODO: find and extract these metadata
#metadata['plate_read_time'] = date.today()
# Section Kind ? either Plate or Cuvette.
#metadata['read_type'] = 'Endpoint, Kinetic, Spectrum, Well Scan, or Flex'
#metadata['data_mode'] = 'For absorbance plates: Absorbance or % Transmittance. For others: Fluorescence, Luminescence, or Time Resolved Fluorescence.'
#metadata['data_type'] = 'Raw or Reduced'
#metadata['read_per_well'] = 6
#metadata['pmt_settings'] = 'Automatic, High, Medium, or Low'
return metadata
def readExperimentSection(self, f):
"""Reads an 'ExperimentSection' structure
:param f: the opened PDA file to read
:type f: file
:returns experimentName: the name of the experiment section
:type experimentName: str
"""
structureName = self.readStructureName(f)
if structureName != "CSExperimentSection":
return
experimentName = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 34)
#structure = binascii.hexlify(f.read(1))
#experimentName, serialnum, school, gradelevel = unpack('<10sHHb', record)
return (experimentName)
def readTmplGroup(self, f):
"""Reads a 'TmplGroup' structure
:param f: the opened PDA file to read
:type f: file
:returns tmplGroupTitle: the template group title
:type tmplGroupTitle: str
"""
structureName = self.readStructureName(f)
if structureName != "CSTmplGroup":
return
tmplGroupTitle = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 8)
descriptorUnit = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 4)
descriptorTitle = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 21)
return (tmplGroupTitle)
def readTmplSample(self, f):
"""Reads a 'TmplSample' structure
:param f: the opened PDA file to read
:type f: file
:returns tmplSampleTitle: the template sample title
:type tmplSampleTitle: str
"""
structureName = self.readStructureName(f)
if structureName != "CSTmplSample":
return
tmplSampleTitle = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 24)
return (tmplSampleTitle)
def readAnalysisSection(self, f):
"""Reads a 'AnalysisSection' structure
:param f: the opened PDA file to read
:type f: file
:returns analysisName: the name of the analysis section
:type analysisName: str
:returns analysisContent: the content of the analysis section
:type analysisContent: str
"""
structureName = self.readStructureName(f)
if structureName != "CSAnalysisSection":
return (None, None)
analysisName = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 28) # skip 28 bytes
analysisContent = self.readStringWithLengthPrefix(f, 4)
#f.seek(f.tell() + 58)
delimiter = ""
i = 0
while (i < 32):
delimiter = delimiter + "\xFF"
i += 1
self.readStringUntilStringDelimiter(f, delimiter)
return (analysisName, analysisContent)
def readWells(self, f):
"""Reads several 'Well' structures, based on the number of wells
:param f: the opened PDA file to read
:type f: file
:returns numberOfWells: the number of wells read
:type numberOfWells: int
"""
numberOfWellsHex = binascii.hexlify(f.read(4))
numberOfWells = int(numberOfWellsHex, 16)
if (numberOfWells == None):
return (None)
i = 0;
while i < numberOfWells:
wellName, rowNumber, columnNumber, plateNumber = self.readWell(f)
i += 1
return (numberOfWells)
def readWell(self, f):
"""Reads a 'Well' structure
:param f: the opened PDA file to read
:type f: file
:returns wellName: the name of the well
:type wellName: str
:returns rowNumber: the number of the row where this well is located
:type rowNumber: int
:returns columnNumber: the number of the column where this well is located
:type columnNumber: int
:returns plateNumber: the number of the plate where this well is located
:type plateNumber: int
"""
structureName = self.readStructureName(f)
if structureName != "CSWell":
return
wellName = self.readStringUntilDelimiter(f)
hex = binascii.hexlify(f.read(2))
rowNumber = int(hex, 16)
hex = binascii.hexlify(f.read(2))
columnNumber = int(hex, 16)
f.seek(f.tell() + 6) # skip 2 + 2 + 2
plateNumber = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 4) # skip 4
return (wellName, rowNumber, columnNumber, plateNumber)
def readPlateSection(self, f):
"""Reads a 'PlateSection' structure
:param f: the opened PDA file to read
:type f: file
:returns plateName: the name of the plate section
:type plateName: string
"""
structureName = self.readStructureName(f)
if structureName != "CSPlateSection":
return (None)
plateName = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 4) # skip 4
return (plateName)
def readPlateData(self, f):
"""Reads a 'PlateData' structure
:param f: the opened PDA file to read
:type f: file
:returns firstReadColumn: the first column of wells read
:type firstReadColumn: int
:returns numberOfColumns: the number of columns read (counting from the first column read)
:type numberOfColumns: int
:returns readNumber: the number of reads
:type readNumber: int
:returns wavelengthsNumber: the number of wavelengths
:type wavelengthsNumber: int
:returns emValues: the values of the different wavelengths, separated by spaces
:type emValues: str
:returns readDuration: the duration of the read
:type readDuration: int
:returns readInterval: the interval between each read
:type readInterval: int
:returns exValues: the values of the excitation wavelengths, separated by spaces
:type exValues: str
:returns trans: the values of trans, formatted as 'H={0}µ, R={1}, V={2}µ, @{3}'
:type trans: str
"""
structureName = self.readStructureName(f)
if structureName != "CSPlateData":
return
f.seek(f.tell() + 6)
firstReadColumnHex = binascii.hexlify(f.read(2))
firstReadColumn = int(firstReadColumnHex, 16)
numberOfColumnsHex = binascii.hexlify(f.read(2))
numberOfColumns = int(numberOfColumnsHex, 16)
readNumberHex = binascii.hexlify(f.read(4))
readNumber = int(readNumberHex, 16)
wavelengthsNumberHex = binascii.hexlify(f.read(4))
wavelengthsNumber = int(wavelengthsNumberHex, 16)
i = 0
emValues = None
while (i < wavelengthsNumber):
emWaveValueHex = binascii.hexlify(f.read(4))
emWaveValue = int(emWaveValueHex, 16)
f.seek(f.tell() + 1)
if (emValues != None):
emValues = str.format("{0} {1}", emValues, emWaveValue)
else:
emValues = str.format("{0}", emWaveValue)
i += 1
f.seek(f.tell() + 4)
readDurationHex = binascii.hexlify(f.read(8))
readDuration = unpack('!d', readDurationHex.decode('hex'))[0]
readIntervalHex = binascii.hexlify(f.read(8))
readInterval = unpack('!d', readIntervalHex.decode('hex'))[0]
f.seek(f.tell() + 170)
i = 0
exValues = None
while (i < wavelengthsNumber):
exWaveValueHex = binascii.hexlify(f.read(4))
exWaveValue = int(exWaveValueHex, 16)
f.seek(f.tell() + 4)
if (exValues != None):
exValues = str.format("{0} {1}", exValues, exWaveValue)
else:
exValues = str.format("{0}", exWaveValue)
i += 1
f.seek(f.tell() + 659)
i = 0
trans = None
while (i < wavelengthsNumber):
transRHex = binascii.hexlify(f.read(4))
transR = int(transRHex, 16)
transAtHex = binascii.hexlify(f.read(4))
transAt = int(transAtHex, 16)
transVHex = binascii.hexlify(f.read(8))
transV = unpack('!d', transVHex.decode('hex'))[0]
transHHex = binascii.hexlify(f.read(4))
transH = int(transHHex, 16)
f.seek(f.tell() + 16)
formattedTrans = str.format("Trans{0}: H={1}\xb5, R={2}, V={3}\xb5, \x40{4}", (i + 1), transH, transR, transV, transAt)
if (trans != None):
trans = str.format("{0}. {1}", trans, formattedTrans)
else:
trans = str.format("{0}", formattedTrans)
i += 1
f.seek(f.tell() + 75)
return (firstReadColumn, numberOfColumns, readNumber, wavelengthsNumber, emValues, readDuration, readInterval, exValues, trans)
def readPlateDescriptor(self, f):
"""Reads a 'PlateDescriptor' structure
:param f: the opened PDA file to read
:type f: file
:returns numberOfPlates: the number of plates
:type numberOfPlates: int
"""
structureName = self.readStructureName(f)
if structureName != "CSPlateDescriptor":
return (None)
f.seek(f.tell() + 1)
numberOfPlatesHex = binascii.hexlify(f.read(4))
numberOfPlates = int(numberOfPlatesHex, 16)
i = 0;
while i < numberOfPlates:
f.seek(f.tell() + 4)
temperatureHex = binascii.hexlify(f.read(4))
temperature = unpack('!f', temperatureHex.decode('hex'))[0]
i += 1
f.seek(f.tell() + 27)
return (numberOfPlates)
def readFlexSites(self, f, numberOfColumns):
"""Reads several 'FlexSite' structures, based on the number of rows and columns
:param f: the opened PDA file to read
:type f: file
:param numberOfColumns: the number of columns read
:type numberOfColumns: int
"""
i = 0;
while i < (numberOfColumns * self.NUMBER_OF_ROWS):
id = self.readFlexSite(f)
if (id == None):
return (None)
i += 1
f.seek(f.tell() + 1)
return (i)
def readFlexSite(self, f):
"""Reads a 'FlexSite' structure
:param f: the opened PDA file to read
:type f: file
:returns id: the id of the flex site
:type id: int
"""
structureName = self.readStructureName(f)
if structureName != "CSFlexSite":
return
dataChunkNumberHex = binascii.hexlify(f.read(4))
dataChunkNumber = int(dataChunkNumberHex, 16)
readNumberHex = binascii.hexlify(f.read(4))
readNumber = int(readNumberHex, 16)
idHex = binascii.hexlify(f.read(4))
id = int(idHex, 16)
dataChunkLengthHex = binascii.hexlify(f.read(4))
dataChunkLength = int(dataChunkLengthHex, 16)
if (dataChunkNumber == None or dataChunkLength == None):
return (None)
i = 0
while (i < dataChunkNumber):
f.seek(f.tell() + dataChunkLength)
i += 1
return (id)
def readCalcPlateBody(self, f):
"""Reads a 'CalcPlateBody' structure
:param f: the opened PDA file to read
:type f: file
:returns wavelength: the wavelength of the plate body
:type wavelength: str
:returns wavelengthCombination: the wavelength combination
:type wavelengthCombination: str
:returns formula: the formula used for combining wavelengths
:type formula: str
:returns unknown: --
:type unknown: str
:returns instrumentInfos: the information about the plate reader instrument
:type instrumentInfos: str
"""
structureName = self.readStructureName(f)
if structureName != "CSCalcPlateBody":
return
f.seek(f.tell() + 23)
wavelength = self.readStringUntilDelimiter(f)
wavelengthCombination = self.readStringUntilDelimiter(f)
formula = self.readStringUntilDelimiter(f)
f.seek(f.tell() + 175)
unknown = self.readStringUntilDelimiter(f)
instrumentInfos = self.readStringUntilDelimiter(f)
return (wavelength, wavelengthCombination, formula, unknown, instrumentInfos)
def readMorphPlateTable(self, f):
"""Reads a 'MorphPlateTable' structure
:param f: the opened PDA file to read
:type f: file
"""
structureName = self.readStructureName(f)
if structureName != "CSMorphPlateTable":
return
f.seek(f.tell() + 77)
return (None)
def readStringUntilDelimiter(self, f, delimiter="\x00"):
"""Reads a string from binary until a single-character delimiter is reached
:param f: the opened PDA file to read
:type f: file
:param delimiter: the delimiter which stops the reading if encountered (default '\x00')
:type delimiter: str
:returns result: the string that was read, without the end delimiter. None if the delimiter was never found
:type result: str
"""
if (f == None):
return None
result = ''
data = f.read(1)
while data != delimiter:
if (not data): # the end of the file has been reached without finding the delimiter
return None
result += str(unpack("c", data)[0])
data = f.read(1)
return (result)
def readStringUntilStringDelimiter(self, f, delimiter="\x00"):
"""Reads a string from binary until a string delimiter is reached (accepts a multiple-characters delimiter)
:param f: the opened PDA file to read
:type f: file
:param delimiter: the delimiter which stops the reading if encountered (default "\x00")
:type delimiter: str
:returns result: the string that was read, without the end delimiter. None if the delimiter was never found.
:type result: str
"""
if (f == None):
return None
result = ''
data = f.read(1)
while data:
result += str(unpack("c", data)[0])
if result.endswith(delimiter):
return (result.replace(delimiter, ""))
data = f.read(1)
return (None) # reached the end of the file without finding the delimiter
def readStructureName(self, f):
"""Reads a structure name in the PDA format
:param f: the opened PDA file to read
:type f: file
:returns structureName: the name of the structure
:type structureName:
"""
structureName = self.readStringWithLengthPrefix(f, 1)
return (structureName)
def readStringWithLengthPrefix(self, f, numberOfByteForPrefix):
"""Reads a string prefixed by its length as a n-byte number
:param f: the opened PDA file to read
:type f: file
:param numberOfByteForPrefix: the number of bytes on which the prefix is encoded
:type numberOfByteForPrefix: int
:returns stringContent: the string read, of length defined by the prefix
:type stringContent: str
"""
if (f == None or numberOfByteForPrefix == None or numberOfByteForPrefix <= 0):
return None
# reads the length of the string, on n-bytes
stringLengthHex = binascii.hexlify(f.read(numberOfByteForPrefix))
stringLength = int(stringLengthHex, 16)
# reads the string itself
stringContent = f.read(stringLength)
return (stringContent)
def skipIfNumber(self, f, numbers):
"""Skips a number
:param f: the opened hexadecimal file to read
:type f: file
:param numbers: a dictionnary of the numbers that should be skipped
:type numbers: dict
"""
if (f == None or numbers == None or numbers.__len__() <= 0):
return None
fileIndexSave = f.tell()
numberHex = binascii.hexlify(f.read(4))
number = int(numberHex, 16)
if (number not in numbers):
f.seek(fileIndexSave)
def saveFlexstationMetadata(self, instance, schema, metadata):
"""Saves or overwrites the datafile's metadata to a Dataset_Files parameter set in the database.
"""
logger.info('Saving Metadata')
parameters = self.getParameters(schema, metadata)
if not parameters:
return None
try:
ps = DatafileParameterSet.objects.get(schema=schema,
dataset_file=instance)
return ps # if already exists then just return it
except DatafileParameterSet.DoesNotExist:
ps = DatafileParameterSet(schema=schema,
dataset_file=instance)
ps.save()
for p in parameters:
if p.name in metadata:
dfp = DatafileParameter(parameterset=ps,
name=p)
if p.isNumeric():
if metadata[p.name] != '':
dfp.numerical_value = metadata[p.name]
dfp.save()
else:
dfp.string_value = metadata[p.name].decode('cp1252')
dfp.save()
return ps
def getParameters(self, schema, metadata):
"""Get a list of parameters for this schema
:param schema: the schema under which the meta-data will be saved
:type schema: Schema
:param numbers: the dictionary of meta-data to be saved
:type numbers: dict
:returns parameters: a list of the parameters that will be saved.
:type parameters: dict
"""
param_objects = ParameterName.objects.filter(schema=schema)
parameters = []
for p in metadata:
parameter = filter(lambda x: x.name == p, param_objects)
if parameter:
parameters.append(parameter[0])
continue
# detect type of parameter
datatype = ParameterName.STRING
# Int test
try:
int(metadata[p])
except ValueError:
pass
except TypeError:
pass
else:
datatype = ParameterName.NUMERIC
# Float test
try:
float(metadata[p])
except ValueError:
pass
except TypeError:
pass
else:
datatype = ParameterName.NUMERIC
return parameters
def getSchema(self):
"""Returns the schema object that the parameter set will use.
"""
try:
return Schema.objects.get(namespace__exact=self.schema)
except Schema.DoesNotExist:
schema = Schema(namespace=self.schema, name=self.name, type=Schema.DATAFILE)
schema.save()
return schema
def getOrCreateParameterNames(self, schema, paramnames):
""" Takes a list of paramnames (defined in the __init__ method) to get or create new parameter names objects
"""
pn_objects = []
for paramname in paramnames:
param_objects = ParameterName.objects.filter(schema=schema, name=paramname['name'])
if len(param_objects) == 0:
pn = ParameterName(schema=schema, name=paramname['name'], full_name=paramname['full_name'],
data_type=paramname['data_type'])
pn.save()
else:
pn = param_objects[0]
pn_objects.append(pn)
return pn_objects
def make_filter(name='', schema=''):
''' Instantiate and return the FlexstationFilter class
:param name: the name of the filter
:param schema: the short name of the schema to use for this filter
:return: a new instance of the FlexstationFilter class
'''
if not name:
raise ValueError("FlexstationFilter requires a name to be specified")
if not schema:
raise ValueError("FlexstationFilter requires a schema to be specified")
return FlexstationFilter(name, schema)
make_filter.__doc__ = FlexstationFilter.__doc__
|
{
"content_hash": "051a690eaf031a9abb4dc880ec6086d3",
"timestamp": "",
"source": "github",
"line_count": 908,
"max_line_length": 279,
"avg_line_length": 41.01211453744494,
"alnum_prop": 0.586589328392277,
"repo_name": "guillaumeprevost/hiri-tardis-filter",
"id": "316d2419884684d3d016cb51e03b3ea2cd711368",
"size": "38982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filters/flexstation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63485"
}
],
"symlink_target": ""
}
|
catNames = []
while True:
print('Enter the name of cat ' + str(len(catNames) + 1) + ' (Or enter nothing to stop.):')
name = input()
if name == '':
break
catNames = catNames + [name] # list concatenation
print('The cat names are:')
for name in catNames:
print(' ' + name)
|
{
"content_hash": "950c8c7f85bf1cd0ec02df8bb23a1f6a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 94,
"avg_line_length": 30.1,
"alnum_prop": 0.584717607973422,
"repo_name": "vdrey/Toolbox",
"id": "ced2f3bb9d3dde93908930f41e84de47fa36665b",
"size": "301",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/AutomateBoringStuff/automate_online-materials/allMyCats2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1004"
},
{
"name": "HTML",
"bytes": "317"
},
{
"name": "Python",
"bytes": "372332"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.abspath('../../accelerometer/'))
# -- Project information -----------------------------------------------------
project = u'accelerometer'
copyright = u'2018, Aiden Doherty'
author = u'Aiden Doherty'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'2.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'accelerometerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'accelerometer.tex', u'accelerometer Documentation',
u'Aiden Doherty', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'accelerometer', u'accelerometer Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'accelerometer', u'accelerometer Documentation',
author, 'accelerometer', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
{
"content_hash": "3f322326583d23a3224dffd2a71428a3",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 78,
"avg_line_length": 28.910714285714285,
"alnum_prop": 0.6403129503808935,
"repo_name": "aidendoherty/biobankAccelerometerAnalysis",
"id": "f37d73e8d0fca8aa3cb68394c9b37e04d29dfad2",
"size": "5421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Java",
"bytes": "55686"
},
{
"name": "Python",
"bytes": "72280"
}
],
"symlink_target": ""
}
|
"""SciPy: Scientific Library for Python
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. The SciPy library
depends on NumPy, which provides convenient and fast N-dimensional
array manipulation. The SciPy library is built to work with NumPy
arrays, and provides many user-friendly and efficient numerical
routines such as routines for numerical integration and optimization.
Together, they run on all popular operating systems, are quick to
install, and are free of charge. NumPy and SciPy are easy to use,
but powerful enough to be depended upon by some of the world's
leading scientists and engineers. If you need to manipulate
numbers on a computer and display or publish the results,
give SciPy a try!
"""
DOCLINES = __doc__.split("\n")
import os
import sys
import subprocess
import textwrap
import warnings
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2):
raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.")
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MAJOR = 0
MINOR = 19
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# scipy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__SCIPY_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of scipy.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('scipy/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load scipy/__init__.py
import imp
version = imp.load_source('scipy.version', 'scipy/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='scipy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SCIPY SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
try:
from sphinx.setup_command import BuildDoc
HAVE_SPHINX = True
except:
HAVE_SPHINX = False
if HAVE_SPHINX:
class ScipyBuildDoc(BuildDoc):
"""Run in-place build before Sphinx doc build"""
def run(self):
ret = subprocess.call([sys.executable, sys.argv[0], 'build_ext', '-i'])
if ret != 0:
raise RuntimeError("Building Scipy failed!")
BuildDoc.run(self)
def check_submodules():
""" verify that the submodules are checked out and clean
use `git submodule update --init`; on failure
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule %s missing' % p)
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: %s' % line)
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
sdist.run(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'scipy'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
if len(sys.argv) < 2:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
# Add commands that do more than print info, but also don't need Cython and
# template parsing.
info_commands.extend(['egg_info', 'install_egg_info', 'rotate'])
for command in info_commands:
if command in sys.argv[1:]:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg',
'build_sphinx')
for command in good_commands:
if command in sys.argv[1:]:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in sys.argv[1:]:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install scipy` (last SciPy release on PyPI)
"""))
return True
if '--help' in sys.argv[1:] or '-h' in sys.argv[1]:
print(textwrap.dedent("""
SciPy-specific help
-------------------
To install SciPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest SciPy release
from PyPI, use `pip install scipy`.
For help with build/installation issues, please ask on the
scipy-user mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/scipy/scipy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python runtests.py` (to build and test)
- `python runtests.py --no-build` (to test installed scipy)
- `>>> scipy.test()` (run tests for installed scipy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in sys.argv[1:]:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# If we got here, we didn't detect what setup.py command was given
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates")
return True
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('scipy')
config.add_data_files(('scipy', '*.txt'))
config.get_version('scipy/version.py')
return config
def setup_package():
# Rewrite the version file every time
write_version_py()
cmdclass = {'sdist': sdist_checked}
if HAVE_SPHINX:
cmdclass['build_sphinx'] = ScipyBuildDoc
# Figure out whether to add ``*_requires = ['numpy']``.
# We don't want to do that unconditionally, because we risk updating
# an installed numpy which fails too often. Just if it's not installed, we
# may give it a try. See gh-3379.
try:
import numpy
except ImportError: # We do not have numpy installed
build_requires = ['numpy>=1.7.1']
else:
# If we're building a wheel, assume there already exist numpy wheels
# for this platform, so it is safe to add numpy to build requirements.
# See gh-5184.
build_requires = (['numpy>=1.7.1'] if 'bdist_wheel' in sys.argv[1:]
else [])
metadata = dict(
name='scipy',
maintainer="SciPy Developers",
maintainer_email="scipy-dev@scipy.org",
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
url="https://www.scipy.org",
download_url="https://github.com/scipy/scipy/releases",
license='BSD',
cmdclass=cmdclass,
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
setup_requires=build_requires,
install_requires=build_requires,
)
if "--force" in sys.argv:
run_build = True
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = parse_setuppy_commands()
# This import is here because it needs to be done before importing setup()
# from numpy.distutils, but after the MANIFEST removing and sdist import
# higher up in this file.
from setuptools import setup
if run_build:
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
metadata['configuration'] = configuration
else:
# Don't import numpy here - non-build actions are required to succeed
# without Numpy for example when pip is used to install Scipy when
# Numpy is not yet present in the system.
# Version number is added to metadata inside configuration() if build
# is run.
metadata['version'] = get_version_info()[0]
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
{
"content_hash": "d6b2b1c7d263bebb842501ca8e96da53",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 85,
"avg_line_length": 35.97342995169082,
"alnum_prop": 0.6082723427113409,
"repo_name": "gdooper/scipy",
"id": "6df8dbb2c0e84300a460c74685b40e359d96843f",
"size": "14915",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4117257"
},
{
"name": "C++",
"bytes": "479346"
},
{
"name": "FORTRAN",
"bytes": "5574493"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "10806134"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
"""
# Python 2/3 compatibility
import sys
PY3 = sys.version_info[0] == 3
if PY3:
long = int
import cv2
from math import cos, sin, sqrt
import numpy as np
if __name__ == "__main__":
img_height = 500
img_width = 500
kalman = cv2.KalmanFilter(2, 1, 0)
code = long(-1)
cv2.namedWindow("Kalman")
while True:
state = 0.1 * np.random.randn(2, 1)
kalman.transitionMatrix = np.array([[1., 1.], [0., 1.]])
kalman.measurementMatrix = 1. * np.ones((1, 2))
kalman.processNoiseCov = 1e-5 * np.eye(2)
kalman.measurementNoiseCov = 1e-1 * np.ones((1, 1))
kalman.errorCovPost = 1. * np.ones((2, 2))
kalman.statePost = 0.1 * np.random.randn(2, 1)
while True:
def calc_point(angle):
return (np.around(img_width/2 + img_width/3*cos(angle), 0).astype(int),
np.around(img_height/2 - img_width/3*sin(angle), 1).astype(int))
state_angle = state[0, 0]
state_pt = calc_point(state_angle)
prediction = kalman.predict()
predict_angle = prediction[0, 0]
predict_pt = calc_point(predict_angle)
measurement = kalman.measurementNoiseCov * np.random.randn(1, 1)
# generate measurement
measurement = np.dot(kalman.measurementMatrix, state) + measurement
measurement_angle = measurement[0, 0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross(center, color, d):
cv2.line(img,
(center[0] - d, center[1] - d), (center[0] + d, center[1] + d),
color, 1, cv2.LINE_AA, 0)
cv2.line(img,
(center[0] + d, center[1] - d), (center[0] - d, center[1] + d),
color, 1, cv2.LINE_AA, 0)
img = np.zeros((img_height, img_width, 3), np.uint8)
draw_cross(np.int32(state_pt), (255, 255, 255), 3)
draw_cross(np.int32(measurement_pt), (0, 0, 255), 3)
draw_cross(np.int32(predict_pt), (0, 255, 0), 3)
cv2.line(img, state_pt, measurement_pt, (0, 0, 255), 3, cv2.LINE_AA, 0)
cv2.line(img, state_pt, predict_pt, (0, 255, 255), 3, cv2.LINE_AA, 0)
kalman.correct(measurement)
process_noise = sqrt(kalman.processNoiseCov[0,0]) * np.random.randn(2, 1)
state = np.dot(kalman.transitionMatrix, state) + process_noise
cv2.imshow("Kalman", img)
code = cv2.waitKey(100)
if code != -1:
break
if code in [27, ord('q'), ord('Q')]:
break
cv2.destroyWindow("Kalman")
|
{
"content_hash": "fee3516940c528cb97355d08aad0c785",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 88,
"avg_line_length": 34.694736842105264,
"alnum_prop": 0.5640169902912622,
"repo_name": "zzjkf2009/Midterm_Astar",
"id": "96a6fa6fe48d2f2d27c55aac698f8ae9ec5fb650",
"size": "3318",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "opencv/samples/python/kalman.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7310"
},
{
"name": "C",
"bytes": "1157337"
},
{
"name": "C#",
"bytes": "42254"
},
{
"name": "C++",
"bytes": "28411379"
},
{
"name": "CMake",
"bytes": "810399"
},
{
"name": "CSS",
"bytes": "4784"
},
{
"name": "Clojure",
"bytes": "1487"
},
{
"name": "Cuda",
"bytes": "1699447"
},
{
"name": "HLSL",
"bytes": "3314"
},
{
"name": "HTML",
"bytes": "220169"
},
{
"name": "Java",
"bytes": "831255"
},
{
"name": "JavaScript",
"bytes": "113900"
},
{
"name": "Makefile",
"bytes": "2690"
},
{
"name": "Objective-C",
"bytes": "44625"
},
{
"name": "Objective-C++",
"bytes": "211774"
},
{
"name": "Perl",
"bytes": "15867"
},
{
"name": "PowerShell",
"bytes": "14589"
},
{
"name": "Python",
"bytes": "911972"
},
{
"name": "Scala",
"bytes": "5683"
},
{
"name": "Shell",
"bytes": "16088"
},
{
"name": "TeX",
"bytes": "34757"
}
],
"symlink_target": ""
}
|
import os
import urllib
import urllib2
import elementtree.ElementTree as ET
import time
from datetime import datetime, timedelta
try:
from hashlib import md5
except ImportError:
from md5 import md5
__all__ = ['ApiClient']
def _local_date(string):
dt = datetime.strptime(string[0:25], '%a, %d %b %Y %H:%M:%S')
return dt + timedelta(hours=6) + timedelta(seconds=_local_time_offset())
def _local_time_offset():
"""Return offset of local zone from GMT"""
if time.localtime().tm_isdst and time.daylight:
return -time.altzone
else:
return -time.timezone
def _date(string):
return datetime.st
def _boolstr(string):
return bool(int(string))
class ToodledoError(Exception):
''' Error return from Toodledo API server'''
def __init__(self, error_msg):
self.msg = error_msg
def __str__(self):
return "Toodledo server returned error: %s" % self.msg
class ToodledoData(object):
_typemap = {
'server': {
'unixtime': int,
'date': _local_date,
'tokenexpires': float
},
'folder': {
'id': int,
'archived': _boolstr,
'private': _boolstr,
'order': int
},
'context': {
'id': int,
'def': _boolstr,
},
'goal': {
'id': int,
'level': int,
'contributes': int,
'archived': _boolstr
},
'account': {
'userid': str,
'alias': str,
'pro': _boolstr,
'dateformat': int,
'timezone': int,
'hidemonths': int,
'hotlistpriority': int,
'hotlistduedate': int,
'lastaddedit': str,
'lastdelete': str,
'lastfolderedit': str,
'lastcontextedit': str,
'lastgoaledit': str,
'lastnotebookedit': str,
},
'task': {
'id': int,
'parent': int,
'children': int,
'title': unicode,
'tag': str,
'folder': int,
'context': str,
'goal': str,
'added': str,
'modified': str,
'startdate': str,
'starttime': str,
'duedate': str,
'duetime': str,
'completed': str,
'reminder': int,
'repeat': int,
'rep_advanced': str,
'status': int,
'star': _boolstr,
'stamp': str,
'priority': int,
'length': int,
'timer': int,
'note': unicode
},
'note': {
'id': int,
'folder': int,
'added': str,
'modified': str,
'title': str,
'text': str,
'private': _boolstr,
'stamp': str,
},
}
def __init__(self,node=None):
typemap = ToodledoData._typemap[node.tag]
for elem in node.getchildren():
self.__dict__[elem.tag] = typemap[elem.tag](elem.text)
for a in node.attrib:
self.__dict__[a] = typemap[a](node.attrib[a])
if node.text and not node.text.isspace() :
self.title = node.text
def __repr__(self):
return str(self.__dict__)
class PoodledoError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return 'PoodledoError("%s")' % self.msg
def __str__(self):
return self.msg
def check_api_key(f):
''' A decorator that makes the decorated function check for a API key'''
def fn(*args, **kwargs):
self = args[0]
# check if key is set to a value
if 'key' in kwargs and kwargs['key'] is not None:
return f(*args, **kwargs)
else:
# try to get the key from the ApiClient
if self.key is not None:
kwargs['key'] = self.key
return f(*args, **kwargs)
else:
raise PoodledoError('need API key to call function %s' % f.__name__)
return fn
def returns_list(f):
def fn(self, **kwargs):
return [ ToodledoData(elem) for elem in f(self, **kwargs) ]
return fn
def returns_item(f):
def fn(self, **kwargs):
return ToodledoData(f(self, **kwargs))
return fn
class ApiClient(object):
''' Toodledo API client'''
_SERVICE_URL = 'http://www.toodledo.com/api.php?'
def __init__(self, key=None, application_id="poodledo"):
''' Initializes a new ApiClient w/o auth credentials'''
self._urlopener = urllib2.build_opener()
self.key = key
self.token = None
self.userid = None
self.application_id = application_id
def set_urlopener(self, opener):
self._urlopener = opener
def _create_url(self,**kwargs):
''' Creates a request url by appending key-value pairs to the SERVICE_URL'''
url = ApiClient._SERVICE_URL
# add args to url (key1=value1;key2=value2)
# trailing underscores are stripped from keys to allow keys like pass_
url += ';'.join(key.rstrip('_') + '=' + urllib2.quote(str(kwargs[key])) for key in sorted(kwargs))
return url
def _check_for_error(self, node):
if node.tag == 'error':
raise ToodledoError(node.text)
def _call(self, **kwargs):
url = self._create_url(**kwargs)
stream = self._urlopener.open(url)
root_node = ET.parse(stream).getroot()
self._check_for_error(root_node)
return root_node
def authenticate(self, email, passwd):
'''
Uses credentials to get userid, token and auth key.
Returns the auth key, which can be cached and used later in the constructor in
order to skip authenticate()
'''
self.userid = self.getUserid(email,passwd)
self.token = self.getToken()
self.key = self._generateKey(self.userid, self.token, passwd)
return self.key
@property
def isAuthenticated(self):
return bool(self.key is not None)
def _generateKey(self, userid, token, passwd):
''' Generates a key as specified in the API docs'''
return md5(md5(passwd).hexdigest() + token + userid).hexdigest()
def getUserid(self, email, passwd):
userid = self._call(method='getUserid', email=email, pass_=passwd).text
if userid == '1':
raise ToodledoError('invalid username/password')
return userid
def getToken(self, userid=None):
if userid is None:
if self.userid is not None:
userid = self.userid
else:
raise Exception() # TODO:
return self._call(method='getToken', userid=userid, appid=self.application_id).text
@check_api_key
@returns_item
def getServerInfo(self, key=None):
return self._call(method='getServerInfo', key=key)
@check_api_key
@returns_item
def getAccountInfo(self, key=None):
return self._call(method='getAccountInfo', key=key)
@check_api_key
@returns_list
def getFolders(self, key=None):
return self._call(method='getFolders', key=key)
@check_api_key
@returns_list
def getContexts(self, key=None):
return self._call(method='getContexts', key=key)
@check_api_key
@returns_list
def getGoals(self, key=None):
return self._call(method='getGoals', key=key)
@check_api_key
@returns_list
def getTasks(self, key=None, **kwargs):
return self._call(method='getTasks', key=key, **kwargs)
@check_api_key
@returns_list
def getDeleted(self, after, key=None ):
return self._call(method='getDeleted', key=key, after=after)
@check_api_key
def addTask(self,key=None,**kwargs):
return self._call(method='addTask', key=key, **kwargs).text
@check_api_key
def addContext(self,key=None,**kwargs):
return self._call(method='addContext', key=key, **kwargs).text
@check_api_key
def addGoal(self,key=None,**kwargs):
return self._call(method='addGoal', key=key, **kwargs).text
@check_api_key
def addFolder(self,key=None,**kwargs):
return self._call(method='addFolder', key=key, **kwargs).text
@check_api_key
def deleteFolder(self, id_, key=None):
return self._call(method='deleteFolder', id_=id_, key=key).text
@check_api_key
def deleteContext(self, id_, key=None):
return self._call(method='deleteContext', id_=id_, key=key).text
@check_api_key
def deleteGoal(self, id_, key=None):
return self._call(method='deleteGoal', id_=id_, key=key).text
@check_api_key
def deleteTask(self, id_, key=None):
return self._call(method='deleteTask', id_=id_, key=key).text
@check_api_key
def editTask(self, id_, key=None, **kwargs):
return self._call(method='editTask', id_=id_, key=key, **kwargs).text
@check_api_key
def editFolder(self, id_, key=None, **kwargs):
return self._call(method='editFolder', id_=id_, key=key, **kwargs).text
def createAccount(self, email, pass_):
'''Create a new account
Returns:
userid - 15 or 16 character hexidecimal string
'''
return self._call(method='createAccount', email=_email, pass_=pass_).text
# Notes API #######################################
@check_api_key
@returns_list
def getNotes(self, key=None):
return self._call(method='getNotes', key=key)
@check_api_key
@returns_list
def getDeletedNotes(self, after, key=None ):
return self._call(method='getDeletedNotes', key=key, after=after)
@check_api_key
def addNote(self,key=None,**kwargs):
return self._call(method='addNote', key=key, **kwargs).text
@check_api_key
def deleteNote(self, id_, key=None):
return self._call(method='deleteNote', id_=id_, key=key).text
@check_api_key
def editNote(self, id_, key=None, **kwargs):
return self._call(method='editNote', id_=id_, key=key, **kwargs).text
|
{
"content_hash": "76877b2c030d5794d961028304c58699",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 106,
"avg_line_length": 30.264367816091955,
"alnum_prop": 0.5399734143562476,
"repo_name": "felixr/poodledo",
"id": "1b7cbecf518ad09fdfc9894d7f11cdc5edcbd1e0",
"size": "10579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poodledo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13409"
}
],
"symlink_target": ""
}
|
"""Tests for jax_cfd.equations."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
from jax_cfd.base import advection
from jax_cfd.base import boundaries
from jax_cfd.base import diffusion
from jax_cfd.base import equations
from jax_cfd.base import finite_differences as fd
from jax_cfd.base import funcutils
from jax_cfd.base import grids
from jax_cfd.base import pressure
from jax_cfd.base import test_util
from jax_cfd.base import time_stepping
import numpy as np
def zero_velocity_field(grid: grids.Grid) -> grids.GridVariableVector:
"""Returns an all-zero periodic velocity fields."""
return tuple(
grids.GridVariable(grids.GridArray(jnp.zeros(grid.shape), o, grid),
boundaries.periodic_boundary_conditions(grid.ndim))
for o in grid.cell_faces)
def sinusoidal_velocity_field(grid: grids.Grid) -> grids.GridVariableVector:
"""Returns a divergence-free velocity flow on `grid`."""
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
vs = tuple(
jnp.sin(2. * np.pi * g / s) for g, s in zip(grid.mesh(), mesh_size))
return tuple(
grids.GridVariable(grids.GridArray(v, o, grid),
boundaries.periodic_boundary_conditions(grid.ndim))
for v, o in zip(vs[1:] + vs[:1], grid.cell_faces))
def gaussian_force_field(grid):
"""Returns a 'Gaussian-shaped' force field in the 'x' direction."""
mesh = grid.mesh()
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
offsets = grid.cell_faces
v = [grids.GridArray(
jnp.exp(-sum([jnp.square(x / s - .5)
for x, s in zip(mesh, mesh_size)]) * 100.),
offsets[0], grid)]
for j in range(1, grid.ndim):
v.append(grids.GridArray(jnp.zeros(grid.shape), offsets[j], grid))
return tuple(v)
def gaussian_forcing(v: grids.GridVariableVector) -> grids.GridArrayVector:
"""Returns Gaussian field forcing."""
grid = grids.consistent_grid(*v)
return gaussian_force_field(grid)
def momentum(v: grids.GridVariableVector, density: float):
"""Returns the momentum due to velocity field `v`."""
grid = grids.consistent_grid(*v)
return jnp.array([u.data for u in v]).sum() * density * jnp.array(
grid.step).prod()
def _convect_upwind(v: grids.GridVariableVector) -> grids.GridArrayVector:
return tuple(advection.advect_upwind(u, v) for u in v)
class SemiImplicitNavierStokesTest(test_util.TestCase):
@parameterized.named_parameters(
dict(testcase_name='semi_implicit_sinusoidal_velocity_base',
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=None,
pressure_solve=pressure.solve_cg,
time_stepper=time_stepping.forward_euler,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=2e-3),
dict(testcase_name='semi_implicit_gaussian_force_upwind',
velocity=zero_velocity_field,
forcing=gaussian_forcing,
shape=(40, 40, 40),
step=(1., 1., 1.),
density=1.,
viscosity=None,
convect=_convect_upwind,
pressure_solve=pressure.solve_cg,
time_stepper=time_stepping.midpoint_rk2,
dt=1e-3,
time_steps=100,
divergence_atol=1e-4,
momentum_atol=2e-4),
dict(testcase_name='semi_implicit_sinusoidal_velocity_fast_diag',
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=advection.convect_linear,
pressure_solve=pressure.solve_fast_diag,
time_stepper=time_stepping.classic_rk4,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=1e-3),
)
def test_divergence_and_momentum(
self, velocity, forcing, shape, step, density, viscosity, convect,
pressure_solve, time_stepper, dt, time_steps, divergence_atol,
momentum_atol,
):
grid = grids.Grid(shape, step)
navier_stokes = equations.semi_implicit_navier_stokes(
density,
viscosity,
dt,
grid,
convect=convect,
pressure_solve=pressure_solve,
forcing=forcing,
time_stepper=time_stepper,
)
v_initial = velocity(grid)
v_final = funcutils.repeated(navier_stokes, time_steps)(v_initial)
divergence = fd.divergence(v_final)
self.assertLess(jnp.max(divergence.data), divergence_atol)
initial_momentum = momentum(v_initial, density)
final_momentum = momentum(v_final, density)
if forcing is not None:
expected_change = (
jnp.array([f.data for f in forcing(v_initial)]).sum() *
jnp.array(grid.step).prod() * dt * time_steps)
else:
expected_change = 0
self.assertAllClose(
initial_momentum + expected_change, final_momentum, atol=momentum_atol)
class ImplicitDiffusionNavierStokesTest(test_util.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='implicit_sinusoidal_velocity_base',
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=None,
diffusion_solve=diffusion.solve_cg,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=3e-3),
dict(
testcase_name='implicit_gaussian_force_upwind',
velocity=zero_velocity_field,
forcing=gaussian_forcing,
shape=(40, 40, 40),
step=(1., 1., 1.),
density=1.,
viscosity=1e-4,
convect=_convect_upwind,
diffusion_solve=diffusion.solve_cg,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=100,
divergence_atol=1e-4,
momentum_atol=9e-4),
dict(
testcase_name='implicit_sinusoidal_velocity_fast_diag',
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=advection.convect_linear,
diffusion_solve=diffusion.solve_fast_diag,
pressure_solve=pressure.solve_fast_diag,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=1e-3),
)
def test_divergence_and_momentum(
self, velocity, forcing, shape, step, density, viscosity, convect,
diffusion_solve, pressure_solve, dt, time_steps, divergence_atol,
momentum_atol,
):
grid = grids.Grid(shape, step)
navier_stokes = equations.implicit_diffusion_navier_stokes(
density,
viscosity,
dt,
grid,
convect=convect,
diffusion_solve=diffusion_solve,
pressure_solve=pressure_solve,
forcing=forcing)
v_initial = velocity(grid)
v_final = funcutils.repeated(navier_stokes, time_steps)(v_initial)
divergence = fd.divergence(v_final)
self.assertLess(jnp.max(divergence.data), divergence_atol)
initial_momentum = momentum(v_initial, density)
final_momentum = momentum(v_final, density)
if forcing is not None:
expected_change = (
jnp.array([f.data for f in forcing(v_initial)]).sum() *
jnp.array(grid.step).prod() * dt * time_steps)
else:
expected_change = 0
self.assertAllClose(
initial_momentum + expected_change, final_momentum, atol=momentum_atol)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "05b16e298dc2a25f9dcd1502df15dbe0",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 79,
"avg_line_length": 33.029535864978904,
"alnum_prop": 0.621486969851814,
"repo_name": "google/jax-cfd",
"id": "79bfeafe8cb54e97d91290bfe5d9458da367069e",
"size": "8404",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax_cfd/base/equations_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7029140"
},
{
"name": "Python",
"bytes": "715552"
}
],
"symlink_target": ""
}
|
import numpy
import matplotlib.pyplot as plot
import time
import instrument
""" Example program to plot the Y-T data from Channel 1"""
# Initialize our scope
test = instrument.RigolScope("/dev/usbtmc0")
# Stop data acquisition
test.write(":STOP")
# start a single acquisition
#
test.write(":SINGLE")
# wait to complete
time.sleep(1)
# Grab the data from channel 1
test.write(":WAV:POIN:MODE NOR")
test.write(":WAV:POIN 600")
test.write(":WAV:DATA? CHAN1")
rawdata = test.read(9000)
data = numpy.frombuffer(rawdata, 'B')
# Get the voltage scale
test.write(":CHAN1:SCAL?")
voltscale = float(test.read(20))
# And the voltage offset
test.write(":CHAN1:OFFS?")
voltoffset = float(test.read(20))
# Walk through the data, and map it to actual voltages
# First invert the data (ya rly)
data = data * -1 + 255
# Now, we know from experimentation that the scope display range is actually
# 30-229. So shift by 130 - the voltage offset in counts, then scale to
# get the actual voltage.
data = (data - 130.0 - voltoffset/voltscale*25) / 25 * voltscale
# Get the timescale
test.write(":TIM:SCAL?")
timescale = float(test.read(20))
# Get the timescale offset
test.write(":TIM:OFFS?")
timeoffset = float(test.read(20))
# Now, generate a time axis. The scope display range is 0-600, with 300 being
# time zero.
time = numpy.arange(-300.0/50*timescale, 300.0/50*timescale, timescale/50.0)
# If we generated too many points due to overflow, crop the length of time.
print "time size: " ,time.size
print "data size: " , data.size
if (data.size > 600):
data = data[0:600:1]
if (time.size > data.size):
time = time[0:600:1]
# See if we should use a different time axis
if (time[599] < 1e-3):
time = time * 1e6
tUnit = "uS"
elif (time[599] < 1):
time = time * 1e3
tUnit = "mS"
else:
tUnit = "S"
# Start data acquisition again, and put the scope back in local mode
test.write(":RUN")
test.write(":KEY:FORC")
# Plot the data
plot.plot(time, data)
plot.title("Oscilloscope Channel 1")
plot.ylabel("Voltage (V)")
plot.xlabel("Time (" + tUnit + ")")
plot.xlim(time[0], time[599])
plot.show()
|
{
"content_hash": "e6af449a6a2acd68ae3c7d50560a99b2",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 24.74418604651163,
"alnum_prop": 0.6879699248120301,
"repo_name": "dboonz/DSO1002A",
"id": "fac04ce3da3ffbb8f776c01a9372220b833c69de",
"size": "2147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/capture_channel_1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5404"
}
],
"symlink_target": ""
}
|
import logging
import urllib.parse
from .stealth_engine import StealthEngine
log = logging.getLogger(__name__)
class AttackException(Exception):
pass
class Attack(object):
attack_name = None
def __init__(self, config_entry):
self.attack = None
for k, v in config_entry.items():
setattr(self, k, v)
self._confirm_config()
self._se = StealthEngine(config_entry)
def _confirm_config(self):
req = ['attribute', 'base_url']
for r in req:
if not getattr(self, r, False):
raise AttackException('Missing configuration {} required for attack.'.format(r))
def next_url(self):
'''Yield the next URL statement in the attack. May block if stealth engine is enabled.'''
for sql in self.next_sql():
params = urllib.parse.urlencode({self.attribute: sql})
url = '{}?{}'.format(self.base_url, params)
yield url
self._se.wait_interval();
def handle_response(self, response, sql):
'''Handle the response. Return non-zero to stop processing. If non-zero,
you can specify an error string.'''
log.debug('Ignoring response in Attack base class.')
return 0, ''
def __repr__(self):
return '<Attack(name={})>'.format(self.attack)
|
{
"content_hash": "44cb349f534d4c962a7f812713cfeb52",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 97,
"avg_line_length": 31,
"alnum_prop": 0.6031507876969242,
"repo_name": "glawler/rabidsqrl",
"id": "5fb84fdef2318b766b75340fd51de1d97afbfe9e",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabidsqrl/attack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "37937"
},
{
"name": "Python",
"bytes": "18875"
}
],
"symlink_target": ""
}
|
import torch
import progressbar
import math
import random
def normalize_with_singularity(x):
r"""
Normalize the given vector across the third dimension.
Extend all vectors by eps=1e-12 to put the null vector at the maximal
cosine distance from any non-null vector.
"""
S, H = x.size()
norm_x = (x**2).sum(dim=1, keepdim=True)
x /= torch.sqrt(norm_x)
zero_vals = (norm_x == 0).view(S)
x[zero_vals] = 1 / math.sqrt(H)
border_vect = torch.zeros((S, 1),
dtype=x.dtype,
device=x.device) + 1e-12
border_vect[zero_vals] = -2*1e12
return torch.cat([x, border_vect], dim=1)
def load_item_file(path_item_file):
r""" Load a .item file indicating the triplets for the ABX score. The
input file must have the following fomat:
line 0 : whatever (not read)
line > 0: #file_ID onset offset #phone prev-phone next-phone speaker
onset : begining of the triplet (in s)
onset : end of the triplet (in s)
"""
with open(path_item_file, 'r') as file:
data = file.readlines()[1:]
data = [x.replace('\n', '') for x in data]
out = {}
phone_match = {}
speaker_match = {}
context_match = {}
for line in data:
items = line.split()
assert(len(items) == 7)
fileID = items[0]
if fileID not in out:
out[fileID] = []
onset, offset = float(items[1]), float(items[2])
context = '+'.join([items[4], items[5]])
phone = items[3]
speaker = items[6]
if phone not in phone_match:
s = len(phone_match)
phone_match[phone] = s
phone_id = phone_match[phone]
if context not in context_match:
s = len(context_match)
context_match[context] = s
context_id = context_match[context]
if speaker not in speaker_match:
s = len(speaker_match)
speaker_match[speaker] = s
speaker_id = speaker_match[speaker]
out[fileID].append([onset, offset, context_id, phone_id, speaker_id])
return out, context_match, phone_match, speaker_match
def get_features_group(in_data, index_order):
in_index = list(range(len(in_data)))
in_index.sort(key=lambda x: [in_data[x][i] for i in index_order])
out_groups = []
last_values = [in_data[in_index[0]][i] for i in index_order]
i_s = 0
curr_group = [[] for i in index_order]
n_orders = len(index_order) - 1
tmp = [in_data[i] for i in in_index]
for index, item in enumerate(tmp):
for order_index, order in enumerate(index_order):
if item[order] != last_values[order_index]:
curr_group[-1].append((i_s, index))
for i in range(n_orders, order_index, -1):
curr_group[i-1].append(curr_group[i])
curr_group[i] = []
if order_index == 0:
out_groups += curr_group[0]
curr_group[0] = []
last_values = [item[i] for i in index_order]
i_s = index
break
if i_s < len(in_data):
curr_group[-1].append((i_s, len(in_data)))
for i in range(n_orders, 0, -1):
curr_group[i-1].append(curr_group[i])
out_groups += curr_group[0]
return in_index, out_groups
class ABXFeatureLoader:
def __init__(self,
path_item_file,
seqList,
featureMaker,
stepFeature,
normalize):
r"""
Args:
path_item_file (str): path to the .item files containing the ABX
triplets
seqList (list): list of items (fileID, path) where fileID refers to
the file's ID as used in path_item_file, and path
is the actual path to the input audio sequence
featureMaker (function): either a function or a callable object.
Takes a path as input and outputs the
feature sequence corresponding to the
given file.
normalize (bool): if True all input features will be noramlized
across the channels dimension.
Note:
You can use this dataset with pre-computed features. For example, if
you have a collection of features files in the torch .pt format then
you can just set featureMaker = torch.load.
"""
files_data, self.context_match, self.phone_match, self.speaker_match = \
load_item_file(path_item_file)
self.seqNorm = True
self.stepFeature = stepFeature
self.loadFromFileData(files_data, seqList, featureMaker, normalize)
def loadFromFileData(self, files_data, seqList, feature_maker, normalize):
# self.features[i]: index_start, size, context_id, phone_id, speaker_id
self.features = []
self.INDEX_CONTEXT = 2
self.INDEX_PHONE = 3
self.INDEX_SPEAKER = 4
data = []
totSize = 0
print("Building the input features...")
bar = progressbar.ProgressBar(maxval=len(seqList))
bar.start()
for index, vals in enumerate(seqList):
fileID, file_path = vals
bar.update(index)
if fileID not in files_data:
continue
features = feature_maker(file_path)
if normalize:
features = normalize_with_singularity(features)
features = features.detach().cpu()
phone_data = files_data[fileID]
for phone_start, phone_end, context_id, phone_id, speaker_id in phone_data:
index_start = max(
0, int(math.ceil(self.stepFeature * phone_start - 0.5)))
index_end = min(features.size(0),
int(math.floor(self.stepFeature * phone_end - 0.5)))
if index_start >= features.size(0) or index_end <= index_start:
continue
loc_size = index_end - index_start
self.features.append([totSize, loc_size, context_id,
phone_id, speaker_id])
data.append(features[index_start:index_end])
totSize += loc_size
bar.finish()
print("...done")
self.data = torch.cat(data, dim=0)
self.feature_dim = self.data.size(1)
def get_data_device(self):
return self.data.device
def cuda(self):
self.data = self.data.cuda()
def cpu(self):
self.data = self.data.cpu()
def get_max_group_size(self, i_group, i_sub_group):
id_start, id_end = self.group_index[i_group][i_sub_group]
return max([self.features[i][1] for i in range(id_start, id_end)])
def get_ids(self, index):
context_id, phone_id, speaker_id = self.features[index][2:]
return context_id, phone_id, speaker_id
def __getitem__(self, index):
i_data, out_size, context_id, phone_id, speaker_id = self.features[index]
return self.data[i_data:(i_data + out_size)], out_size, (context_id, phone_id, speaker_id)
def __len__(self):
return len(self.features)
def get_n_speakers(self):
return len(self.speaker_match)
def get_n_context(self):
return len(self.context_match)
def get_n_phone(self):
return len(self.phone_match)
def get_n_groups(self):
return len(self.group_index)
def get_n_sub_group(self, index_sub_group):
return len(self.group_index[index_sub_group])
def get_iterator(self, mode, max_size_group):
if mode == 'within':
return ABXWithinGroupIterator(self, max_size_group)
if mode == 'across':
return ABXAcrossGroupIterator(self, max_size_group)
raise ValueError(f"Invalid mode: {mode}")
class ABXIterator:
r"""
Base class building ABX's triplets.
"""
def __init__(self, abxDataset, max_size_group):
self.max_size_group = max_size_group
self.dataset = abxDataset
self.len = 0
self.index_csp, self.groups_csp = \
get_features_group(abxDataset.features,
[abxDataset.INDEX_CONTEXT,
abxDataset.INDEX_SPEAKER,
abxDataset.INDEX_PHONE])
def get_group(self, i_start, i_end):
data = []
max_size = 0
to_take = list(range(i_start, i_end))
if i_end - i_start > self.max_size_group:
to_take = random.sample(to_take, k=self.max_size_group)
for i in to_take:
loc_data, loc_size, loc_id = self.dataset[self.index_csp[i]]
max_size = max(loc_size, max_size)
data.append(loc_data)
N = len(to_take)
out_data = torch.zeros(N, max_size,
self.dataset.feature_dim,
device=self.dataset.get_data_device())
out_size = torch.zeros(N, dtype=torch.long,
device=self.dataset.get_data_device())
for i in range(N):
size = data[i].size(0)
out_data[i, :size] = data[i]
out_size[i] = size
return out_data, out_size, loc_id
def __len__(self):
return self.len
def get_board_size(self):
r"""
Get the output dimension of the triplet's space.
"""
pass
class ABXWithinGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX within score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXWithinGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = True
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
if i_end - i_start > 1:
self.len += (len(speaker_group) - 1)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
if i_end_a - i_start_a == 1:
continue
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
data_b, size_b, id_b = self.get_group(i_start_b,
i_end_b)
data_a, size_a, id_a = self.get_group(i_start_a,
i_end_a)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0]
yield out_coords, (data_a, size_a), (data_b, size_b), \
(data_a, size_a)
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context())
class ABXAcrossGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX across score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXAcrossGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = False
self.get_speakers_from_cp = {}
self.max_x = 5
for context_group in self.groups_csp:
for speaker_group in context_group:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
if c_id not in self.get_speakers_from_cp:
self.get_speakers_from_cp[c_id] = {}
if p_id not in self.get_speakers_from_cp[c_id]:
self.get_speakers_from_cp[c_id][p_id] = {}
self.get_speakers_from_cp[c_id][p_id][s_id] = (
i_start, i_end)
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
self.len += (len(speaker_group) - 1) * (min(self.max_x,
len(self.get_speakers_from_cp[c_id][p_id]) - 1))
def get_other_speakers_in_group(self, i_start_group):
c_id, p_id, s_id = self.dataset.get_ids(self.index_csp[i_start_group])
return [v for k, v in self.get_speakers_from_cp[c_id][p_id].items() if k != s_id]
def get_abx_triplet(self, i_a, i_b, i_x):
i_start_a, i_end_a = i_a
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
i_start_b, i_end_b = i_b
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
i_start_x, i_end_x = i_x
data_x, size_x, id_x = self.get_group(i_start_x, i_end_x)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0], id_x[2]
return out_coords, (data_a, size_a), (data_b, size_b), \
(data_x, size_x)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
ref = self.get_other_speakers_in_group(i_start_a)
if len(ref) > self.max_x:
speakers_a = random.sample(ref, k=self.max_x)
else:
speakers_a = ref
for i_start_x, i_end_x in speakers_a:
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
yield self.get_abx_triplet((i_start_a, i_end_a), (i_start_b, i_end_b), (i_start_x, i_end_x))
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context(),
self.dataset.get_n_speakers())
|
{
"content_hash": "5dd6754a3b2bdee3c88513ec4bc3a3d5",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 120,
"avg_line_length": 35.47785547785548,
"alnum_prop": 0.5128777923784494,
"repo_name": "facebookresearch/libri-light",
"id": "51d46325e3a93ff6a724168119c43c35439bc76c",
"size": "15291",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "eval/ABX_src/abx_iterators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "3385"
},
{
"name": "Python",
"bytes": "207420"
},
{
"name": "Shell",
"bytes": "396"
}
],
"symlink_target": ""
}
|
"""The CPIO directory implementation."""
from dfvfs.path import cpio_path_spec
from dfvfs.vfs import directory
class CPIODirectory(directory.Directory):
"""File system directory that uses CPIOArchiveFile."""
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
CPIOPathSpec: path specification.
"""
location = getattr(self.path_spec, 'location', None)
if location and location.startswith(self._file_system.PATH_SEPARATOR):
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
sub_directories = set()
for cpio_archive_file_entry in cpio_archive_file.GetFileEntries(
path_prefix=location[1:]):
path = cpio_archive_file_entry.path
if not path or path == location:
continue
prefix, suffix = self._file_system.GetPathSegmentAndSuffix(
location[1:], path)
if not suffix:
path_spec_location = self._file_system.JoinPath([path])
yield cpio_path_spec.CPIOPathSpec(
location=path_spec_location, parent=self.path_spec.parent)
elif prefix not in sub_directories:
sub_directories.add(prefix)
# Include prefixes as virtual sub directories.
path_spec_location = self._file_system.JoinPath([prefix])
yield cpio_path_spec.CPIOPathSpec(
location=path_spec_location, parent=self.path_spec.parent)
|
{
"content_hash": "3fd867ffd78c2e55b786a92272dde023",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 33.93333333333333,
"alnum_prop": 0.6719056974459725,
"repo_name": "log2timeline/dfvfs",
"id": "f9e5615b9d5cd2a74ccc7f87b0b2d145b335f8ea",
"size": "1551",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dfvfs/vfs/cpio_directory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
}
|
from measurements import smooth_gesture_util
from telemetry.core.platform import tracing_category_filter
from telemetry.core.platform import tracing_options
from telemetry.timeline.model import TimelineModel
from telemetry.page.actions import action_runner
from telemetry.value import trace
from telemetry.web_perf import timeline_interaction_record as tir_module
RUN_SMOOTH_ACTIONS = 'RunSmoothAllActions'
class TimelineController(object):
def __init__(self):
super(TimelineController, self).__init__()
self.trace_categories = None
self._model = None
self._renderer_process = None
self._smooth_records = []
self._interaction = None
def SetUp(self, page, tab):
"""Starts gathering timeline data.
"""
# Resets these member variables incase this object is reused.
self._model = None
self._renderer_process = None
if not tab.browser.platform.tracing_controller.IsChromeTracingSupported():
raise Exception('Not supported')
category_filter = tracing_category_filter.TracingCategoryFilter(
filter_string=self.trace_categories)
for delay in page.GetSyntheticDelayCategories():
category_filter.AddSyntheticDelay(delay)
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
tab.browser.platform.tracing_controller.Start(options, category_filter)
def Start(self, tab):
# Start the smooth marker for all actions.
runner = action_runner.ActionRunner(tab)
self._interaction = runner.BeginInteraction(
RUN_SMOOTH_ACTIONS, is_smooth=True)
def Stop(self, tab, results):
# End the smooth marker for all actions.
self._interaction.End()
# Stop tracing.
timeline_data = tab.browser.platform.tracing_controller.Stop()
results.AddValue(trace.TraceValue(
results.current_page, timeline_data))
self._model = TimelineModel(timeline_data)
self._renderer_process = self._model.GetRendererProcessFromTabId(tab.id)
renderer_thread = self.model.GetRendererThreadFromTabId(tab.id)
run_smooth_actions_record = None
self._smooth_records = []
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
r = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
if r.label == RUN_SMOOTH_ACTIONS:
assert run_smooth_actions_record is None, (
'TimelineController cannot issue more than 1 %s record' %
RUN_SMOOTH_ACTIONS)
run_smooth_actions_record = r
elif r.is_smooth:
self._smooth_records.append(
smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
self.model, r))
# If there is no other smooth records, we make measurements on time range
# marked by timeline_controller itself.
# TODO(nednguyen): when crbug.com/239179 is marked fixed, makes sure that
# page sets are responsible for issueing the markers themselves.
if len(self._smooth_records) == 0 and run_smooth_actions_record:
self._smooth_records = [run_smooth_actions_record]
def CleanUp(self, tab):
if tab.browser.platform.tracing_controller.is_tracing_running:
tab.browser.platform.tracing_controller.Stop()
@property
def model(self):
return self._model
@property
def renderer_process(self):
return self._renderer_process
@property
def smooth_records(self):
return self._smooth_records
|
{
"content_hash": "ac4ead537188c525a6eac02a266399b2",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 36.23157894736842,
"alnum_prop": 0.720801859384079,
"repo_name": "sgraham/nope",
"id": "2348ec4a89d9f5db5850c3362b8f02b0ab9ff507",
"size": "3604",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tools/perf/measurements/timeline_controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "39967"
},
{
"name": "C",
"bytes": "4061434"
},
{
"name": "C++",
"bytes": "279546186"
},
{
"name": "CMake",
"bytes": "27212"
},
{
"name": "CSS",
"bytes": "919339"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "15989749"
},
{
"name": "Java",
"bytes": "7541683"
},
{
"name": "JavaScript",
"bytes": "32372588"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "40513"
},
{
"name": "Objective-C",
"bytes": "1584184"
},
{
"name": "Objective-C++",
"bytes": "8249988"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "169060"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427339"
},
{
"name": "Python",
"bytes": "8346306"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "844553"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
"""
Author: P Shreyas Shetty
Implementation of Newton-Raphson method for solving equations of kind
f(x) = 0. It is an iterative method where solution is found by the expression
x[n+1] = x[n] + f(x[n])/f'(x[n])
If no solution exists, then either the solution will not be found when iteration
limit is reached or the gradient f'(x[n]) approaches zero. In both cases, exception
is raised. If iteration limit is reached, try increasing maxiter.
"""
import math as m
def calc_derivative(f, a, h=0.001):
"""
Calculates derivative at point a for function f using finite difference
method
"""
return (f(a + h) - f(a - h)) / (2 * h)
def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=False):
a = x0 # set the initial guess
steps = [a]
error = abs(f(a))
f1 = lambda x: calc_derivative(f, x, h=step) # noqa: E731 Derivative of f(x)
for _ in range(maxiter):
if f1(a) == 0:
raise ValueError("No converging solution found")
a = a - f(a) / f1(a) # Calculate the next estimate
if logsteps:
steps.append(a)
if error < maxerror:
break
else:
raise ValueError("Iteration limit reached, no converging solution found")
if logsteps:
# If logstep is true, then log intermediate steps
return a, error, steps
return a, error
if __name__ == "__main__":
from matplotlib import pyplot as plt
f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) # noqa: E731
solution, error, steps = newton_raphson(
f, x0=10, maxiter=1000, step=1e-6, logsteps=True
)
plt.plot([abs(f(x)) for x in steps])
plt.xlabel("step")
plt.ylabel("error")
plt.show()
print(f"solution = {{{solution:f}}}, error = {{{error:f}}}")
|
{
"content_hash": "b90eea83d9ff9250ee6218e8826dd1c3",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 87,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.6080043859649122,
"repo_name": "wuweilin/python",
"id": "f2b7cb9766d23ef8a1acb5b90b3841722be60257",
"size": "1824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "maths/newton_raphson.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import sys
import os
import paramiko
remote_machines = os.environ['REMOTE_MACHINES']
remote_machines = remote_machines.split(', ')
print 'remote_machines', remote_machines
remote_dir=os.environ['REMOTE_DIR']
remote_dir=remote_dir.split(', ')
print 'remote_dir',remote_dir
remote_ip=os.environ['REMOTE_IP']
remote_ip=remote_ip.split(', ')
print 'remote_ip',remote_ip
remote_user=os.environ['REMOTE_USER']
remote_user=remote_user.split(', ')
print 'remote_user',remote_user
remote_port=os.environ['REMOTE_PORT']
remote_port=remote_port.split(', ')
print 'remote_port',int(remote_port[0])
all_machines = {}
def set_default_value_to_all_machines():
for idx, item in enumerate(remote_machines):
all_machines[item]={}
all_machines[item]['ip']=remote_ip[idx]
all_machines[item]['dir']=remote_dir[idx]
all_machines[item]['port']=remote_port[idx]
all_machines[item]['user']=remote_user[idx]
all_machines[item]['pwd']=os.environ[item+'_PWD']
def reboot_machine(machine, item):
print 'machine:',machine, 'item:', item
remote_ip = machine['ip']
remote_port = machine['port']
remote_user = machine['user']
remote_pwd = machine['pwd']
remote_dir = os.path.join(machine['dir'],'workspace','reboot', 'reboot.py')
print remote_dir
ssh = paramiko.SSHClient()
print 'ssh:',ssh
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(remote_ip, int(remote_port), remote_user, remote_pwd)
# excute reboot.py on machine
cmd = 'python '+remote_dir
if item == 'win32_win7':
cmd = 'cmd.exe /c "python '+remote_dir+'"'
stdin, stdout, stderr = ssh.exec_command(cmd)
print stdout.readlines()
ssh.close()
print 'OK'
def main():
print 'in main'
set_default_value_to_all_machines()
for item in all_machines:
reboot_machine( all_machines[item], item )
# -------------- main --------------
if __name__ == '__main__':
sys_ret = 0
try:
sys_ret = main()
except:
traceback.print_exc()
sys_ret = 1
finally:
sys.exit(sys_ret)
|
{
"content_hash": "6ff5d4868e36948ab177750dfe7c60d1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 29.375,
"alnum_prop": 0.6345153664302601,
"repo_name": "hsiaoyi/Melo",
"id": "a35ce631e4e2af91e0e74d503ee234a9c06dbd13",
"size": "2133",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "cocos2d/tools/jenkins-scripts/utils/reboot.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2503"
},
{
"name": "C",
"bytes": "1827127"
},
{
"name": "C++",
"bytes": "36482540"
},
{
"name": "CMake",
"bytes": "296648"
},
{
"name": "CSS",
"bytes": "231"
},
{
"name": "Csound Document",
"bytes": "16092"
},
{
"name": "GLSL",
"bytes": "121145"
},
{
"name": "HTML",
"bytes": "10731"
},
{
"name": "Java",
"bytes": "767793"
},
{
"name": "JavaScript",
"bytes": "8001654"
},
{
"name": "Lua",
"bytes": "3236083"
},
{
"name": "Makefile",
"bytes": "101126"
},
{
"name": "Objective-C",
"bytes": "3064201"
},
{
"name": "Objective-C++",
"bytes": "767382"
},
{
"name": "PLSQL",
"bytes": "26674"
},
{
"name": "Python",
"bytes": "841204"
},
{
"name": "Shell",
"bytes": "54857"
}
],
"symlink_target": ""
}
|
"""
py.test will automatically detect this file.
"""
import random
import pytest
from . import utils
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = True
from librmm_cffi import librmm as rmm
# Setup a fixture that is executed once for every test session to set
# a constant seed for the RNG.
@pytest.fixture(scope='session', autouse=True)
def rand_seed():
# To check whether this is applied.
# Run with: `py.test -s` to see ensure the following message is printed.
print("Seeding np.random")
utils.seed_rand()
random.seed(0)
# Set up a fixture for the RMM memory manager to initialize and finalize it before
# and after tests.
#@pytest.fixture(scope="session", autouse=True)
#def rmm():
# librmm.initialize()
# yield librmm
# librmm.finalize()
|
{
"content_hash": "7bf2d1ba8f5bcb83b0af176f402a778a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 82,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.7215346534653465,
"repo_name": "gpuopenanalytics/libgdf",
"id": "8fb8a1ad4729b8b165e3d2e20f6470b354b6caf2",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libgdf/python/tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10998"
},
{
"name": "C++",
"bytes": "155270"
},
{
"name": "CMake",
"bytes": "23819"
},
{
"name": "Cuda",
"bytes": "868108"
},
{
"name": "Python",
"bytes": "43667"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('draftin', '0010_auto_20160904_1549'),
]
operations = [
migrations.AddField(
model_name='draft',
name='canonical_url',
field=models.URLField(blank=True, default=''),
),
migrations.AlterField(
model_name='draft',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='draft',
name='draft_id',
field=models.IntegerField(blank=True, editable=False, null=True),
),
]
|
{
"content_hash": "ae8fb6bc6d170e7ddd593544ea0c4732",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 26.25,
"alnum_prop": 0.5619047619047619,
"repo_name": "whatisjasongoldstein/django-draftin",
"id": "0b1c20e92f41841fa82ed27cf3300c34eb6d3627",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "draftin/migrations/0011_auto_20160905_2144.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25614"
}
],
"symlink_target": ""
}
|
"""The IPython Core Notification Center.
See docs/source/development/notification_blueprint.txt for an overview of the
notification module.
"""
__docformat__ = "restructuredtext en"
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
# Tell nose to skip the testing of this module
__test__ = {}
class NotificationCenter(object):
"""Synchronous notification center
Examples
--------
>>> import IPython.kernel.core.notification as notification
>>> def callback(theType, theSender, args={}):
... print theType,theSender,args
...
>>> notification.sharedCenter.add_observer(callback, 'NOTIFICATION_TYPE', None)
>>> notification.sharedCenter.post_notification('NOTIFICATION_TYPE', object()) # doctest:+ELLIPSIS
NOTIFICATION_TYPE ...
"""
def __init__(self):
super(NotificationCenter, self).__init__()
self._init_observers()
def _init_observers(self):
"""Initialize observer storage"""
self.registered_types = set() #set of types that are observed
self.registered_senders = set() #set of senders that are observed
self.observers = {} #map (type,sender) => callback (callable)
def post_notification(self, theType, sender, **kwargs):
"""Post notification (type,sender,**kwargs) to all registered
observers.
Implementation notes:
* If no registered observers, performance is O(1).
* Notificaiton order is undefined.
* Notifications are posted synchronously.
"""
if(theType==None or sender==None):
raise Exception("NotificationCenter.post_notification requires \
type and sender.")
# If there are no registered observers for the type/sender pair
if((theType not in self.registered_types and
None not in self.registered_types) or
(sender not in self.registered_senders and
None not in self.registered_senders)):
return
for o in self._observers_for_notification(theType, sender):
o(theType, sender, args=kwargs)
def _observers_for_notification(self, theType, sender):
"""Find all registered observers that should recieve notification"""
keys = (
(theType,sender),
(theType, None),
(None, sender),
(None,None)
)
obs = set()
for k in keys:
obs.update(self.observers.get(k, set()))
return obs
def add_observer(self, callback, theType, sender):
"""Add an observer callback to this notification center.
The given callback will be called upon posting of notifications of
the given type/sender and will receive any additional kwargs passed
to post_notification.
Parameters
----------
observerCallback : callable
Callable. Must take at least two arguments::
observerCallback(type, sender, args={})
theType : hashable
The notification type. If None, all notifications from sender
will be posted.
sender : hashable
The notification sender. If None, all notifications of theType
will be posted.
"""
assert(callback != None)
self.registered_types.add(theType)
self.registered_senders.add(sender)
self.observers.setdefault((theType,sender), set()).add(callback)
def remove_all_observers(self):
"""Removes all observers from this notification center"""
self._init_observers()
sharedCenter = NotificationCenter()
|
{
"content_hash": "14fc8b5e37fe4e5aea81e2b54c7edf44",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 102,
"avg_line_length": 35.3089430894309,
"alnum_prop": 0.5477780336173153,
"repo_name": "toomoresuch/pysonengine",
"id": "ee9701eccb9ed31cb0df1556598a825b003c0644",
"size": "4362",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "eggs/ipython-0.10.1-py2.6.egg/IPython/kernel/core/notification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "401941"
},
{
"name": "JavaScript",
"bytes": "628757"
},
{
"name": "Python",
"bytes": "12919662"
},
{
"name": "Shell",
"bytes": "416"
},
{
"name": "VimL",
"bytes": "4587"
}
],
"symlink_target": ""
}
|
from zerver.lib.onboarding import create_if_missing_realm_internal_bots
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Realm, UserProfile
class TestRealmInternalBotCreation(ZulipTestCase):
def test_create_if_missing_realm_internal_bots(self) -> None:
realm_internal_bots_dict = [
{"var_name": "TEST_BOT", "email_template": "test-bot@%s", "name": "Test Bot"}
]
def check_test_bot_exists() -> bool:
all_realms_count = Realm.objects.count()
all_test_bot_count = UserProfile.objects.filter(
email="test-bot@zulip.com",
).count()
return all_realms_count == all_test_bot_count
self.assertFalse(check_test_bot_exists())
with self.settings(REALM_INTERNAL_BOTS=realm_internal_bots_dict):
create_if_missing_realm_internal_bots()
self.assertTrue(check_test_bot_exists())
|
{
"content_hash": "9dae0405f2a2c40e51380fcf792a62eb",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 89,
"avg_line_length": 42.40909090909091,
"alnum_prop": 0.6516613076098606,
"repo_name": "zulip/zulip",
"id": "9ef66210b3c920c66a8961ab4e545d32a7871023",
"size": "933",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "zerver/tests/test_onboarding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "509211"
},
{
"name": "Dockerfile",
"bytes": "4219"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "696430"
},
{
"name": "Handlebars",
"bytes": "384277"
},
{
"name": "JavaScript",
"bytes": "4098367"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112433"
},
{
"name": "Python",
"bytes": "10336945"
},
{
"name": "Ruby",
"bytes": "3166"
},
{
"name": "Shell",
"bytes": "147162"
},
{
"name": "TypeScript",
"bytes": "286785"
}
],
"symlink_target": ""
}
|
import rospy
import math
from tf import transformations
from geometry_msgs.msg import PoseWithCovarianceStamped
class PoseSetter(rospy.SubscribeListener):
def __init__(self, pose):
self.pose = pose
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
p = PoseWithCovarianceStamped()
p.header.frame_id = "map"
p.pose.pose.position.x = self.pose[0]
p.pose.pose.position.y = self.pose[1]
(p.pose.pose.orientation.x,
p.pose.pose.orientation.y,
p.pose.pose.orientation.z,
p.pose.pose.orientation.w) = transformations.quaternion_from_euler(0, 0, self.pose[2])
p.pose.covariance[6*0+0] = 0.5 * 0.5
p.pose.covariance[6*1+1] = 0.5 * 0.5
p.pose.covariance[6*3+3] = math.pi/12.0 * math.pi/12.0
peer_publish(p)
if __name__ == '__main__':
pose = map(float, rospy.myargv()[1:])
rospy.init_node('pose_setter', anonymous=True)
pub = rospy.Publisher("initialpose", PoseWithCovarianceStamped, PoseSetter(pose))
rospy.spin()
|
{
"content_hash": "aab7802a2f7cb7c28bf2a9cce1fd5341",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 95,
"avg_line_length": 34.096774193548384,
"alnum_prop": 0.641438032166509,
"repo_name": "OSUrobotics/privacy-interfaces",
"id": "997a0ffccac41b70c17b2b119693e881e02e6619",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "navigation/navigation/amcl/test/set_pose.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83936"
},
{
"name": "C++",
"bytes": "1360235"
},
{
"name": "CMake",
"bytes": "46381"
},
{
"name": "Matlab",
"bytes": "2021"
},
{
"name": "Objective-C",
"bytes": "316"
},
{
"name": "Python",
"bytes": "364838"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('skalvi', '0010_participatein_user_profile_id'),
('skalvi', '0012_merge_20170313_1449'),
]
operations = [
]
|
{
"content_hash": "d888a8434611504262396174046388a9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 57,
"avg_line_length": 19.642857142857142,
"alnum_prop": 0.6436363636363637,
"repo_name": "ung-it/UngIT",
"id": "7ca6de41fc2353a4d4b12d6116ad035d9cb84792",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "skalvi/migrations/0013_merge_20170314_1250.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17551"
},
{
"name": "HTML",
"bytes": "74613"
},
{
"name": "JavaScript",
"bytes": "124785"
},
{
"name": "Python",
"bytes": "97261"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('user_count', models.IntegerField()),
('view_count', models.IntegerField()),
],
options={
'verbose_name': 'Access Record',
'verbose_name_plural': 'Access Record',
},
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('nagios_name', models.CharField(blank=True, max_length=64, null=True, verbose_name='Nagios Host ID')),
('ip', models.GenericIPAddressField(blank=True, null=True)),
('internal_ip', models.GenericIPAddressField(blank=True, null=True)),
('user', models.CharField(max_length=64)),
('password', models.CharField(max_length=128)),
('ssh_port', models.IntegerField(blank=True, null=True)),
('status', models.SmallIntegerField(choices=[(0, 'Normal'), (1, 'Down'), (2, 'No Connect'), (3, 'Error')])),
('brand', models.CharField(choices=[('DELL', 'DELL'), ('HP', 'HP'), ('Other', 'Other')], max_length=64)),
('model', models.CharField(max_length=64)),
('cpu', models.CharField(max_length=64)),
('core_num', models.SmallIntegerField(choices=[(2, b'2 Cores'), (4, b'4 Cores'), (6, b'6 Cores'), (8, b'8 Cores'), (10, b'10 Cores'), (12, b'12 Cores'), (14, b'14 Cores'), (16, b'16 Cores'), (18, b'18 Cores'), (20, b'20 Cores'), (22, b'22 Cores'), (24, b'24 Cores'), (26, b'26 Cores'), (28, b'28 Cores')])),
('hard_disk', models.IntegerField()),
('memory', models.IntegerField()),
('system', models.CharField(choices=[('CentOS', 'CentOS'), ('FreeBSD', 'FreeBSD'), ('Ubuntu', 'Ubuntu')], max_length=32, verbose_name='System OS')),
('system_version', models.CharField(max_length=32)),
('system_arch', models.CharField(choices=[('x86_64', 'x86_64'), ('i386', 'i386')], max_length=32)),
('create_time', models.DateField()),
('guarantee_date', models.DateField()),
('service_type', models.CharField(choices=[(b'moniter', 'Moniter'), (b'lvs', 'LVS'), (b'db', 'Database'), (b'analysis', 'Analysis'), (b'admin', 'Admin'), (b'storge', 'Storge'), (b'web', 'WEB'), (b'email', 'Email'), (b'mix', 'Mix')], max_length=32)),
('description', models.TextField()),
],
options={
'verbose_name': 'Host',
'verbose_name_plural': 'Host',
},
),
migrations.CreateModel(
name='HostGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('description', models.TextField()),
('hosts', models.ManyToManyField(blank=True, related_name='groups', to='app.Host', verbose_name='Hosts')),
],
options={
'verbose_name': 'Host Group',
'verbose_name_plural': 'Host Group',
},
),
migrations.CreateModel(
name='IDC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('contact', models.CharField(max_length=32)),
('telphone', models.CharField(max_length=32)),
('address', models.CharField(max_length=128)),
('customer_id', models.CharField(max_length=128)),
('create_time', models.DateField(auto_now=True)),
],
options={
'verbose_name': 'IDC',
'verbose_name_plural': 'IDC',
},
),
migrations.CreateModel(
name='MaintainLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('maintain_type', models.CharField(max_length=32)),
('hard_type', models.CharField(max_length=16)),
('time', models.DateTimeField()),
('operator', models.CharField(max_length=16)),
('note', models.TextField()),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Host')),
],
options={
'verbose_name': 'Maintain Log',
'verbose_name_plural': 'Maintain Log',
},
),
migrations.AddField(
model_name='host',
name='idc',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.IDC'),
),
]
|
{
"content_hash": "fd0547652fb8806792e877f5031f23b4",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 323,
"avg_line_length": 50.27272727272727,
"alnum_prop": 0.516998191681736,
"repo_name": "pobear/django-xadmin",
"id": "a1cacdfe6fdbc88d52d5c69f497953275dac3bf5",
"size": "5602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_app/app/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23733"
},
{
"name": "HTML",
"bytes": "95259"
},
{
"name": "JavaScript",
"bytes": "65236"
},
{
"name": "Python",
"bytes": "425488"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from blight_risk_prediction import util, colmap
def make_owner_features(db_connection):
"""
Get the number of years that a home was owned by a corporation in last three years.
Input:
db_connection: connection to postgres database. "set schema ..." must have been called on this connection
to select the correct schema from which to load inspections
Output:
A pandas dataframe, with one row per inspection and one column per feature.
"""
query = ("SELECT inspections.inspection_date, taxes.* "
"FROM parcels_inspections AS inspections "
"JOIN public.taxes_owners AS taxes "
"ON inspections.parcel_id = taxes.parcel_id")
df = pd.read_sql(query, con=db_connection)
for year in colmap.insp_years:
indices_this_year = df.inspection_date.dt.year == int(year)
year_range = range(int(year[2:]) - 3, int(year[2:]))
if int(year) == 2008:
num_org = df[["owner_2007", "parcel_id"]]
elif int(year) == 2009:
num_org = df[["owner_2007", "owner_2008"]]
elif int(year) >= 2010:
num_org = df[['owner_20' + str(y).zfill(2) for y in year_range]]
try:
computation = num_org.apply(lambda row: len(row[row == 'ORGANIZATION']), axis=1)
df.ix[indices_this_year, "owner_ner"] = computation
except:
df.ix[indices_this_year, "owner_ner"] = np.nan
return df
|
{
"content_hash": "0dbc0d8d07924b252df08b2c74284fb3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 109,
"avg_line_length": 35.16279069767442,
"alnum_prop": 0.6084656084656085,
"repo_name": "dssg/cincinnati2015-public",
"id": "8b1527c7a39b0d2d2e74d80035fa2f30da2b84e1",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blight_risk_prediction/features/ner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3983"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Jupyter Notebook",
"bytes": "120725"
},
{
"name": "Python",
"bytes": "154987"
}
],
"symlink_target": ""
}
|
import mock
from oslo_serialization import jsonutils
import six
from heat.common import exception
from heat.engine import constraints
from heat.engine.hot import functions as hot_funcs
from heat.engine.hot import parameters as hot_param
from heat.engine import parameters
from heat.engine import plugin_manager
from heat.engine import properties
from heat.engine import resources
from heat.engine import support
from heat.engine import translation
from heat.tests import common
class PropertySchemaTest(common.HeatTestCase):
def test_schema_all(self):
d = {
'type': 'string',
'description': 'A string',
'default': 'wibble',
'required': False,
'update_allowed': False,
'immutable': False,
'constraints': [
{'length': {'min': 4, 'max': 8}},
]
}
s = properties.Schema(properties.Schema.STRING, 'A string',
default='wibble',
constraints=[constraints.Length(4, 8)])
self.assertEqual(d, dict(s))
def test_schema_list_schema(self):
d = {
'type': 'list',
'description': 'A list',
'schema': {
'*': {
'type': 'string',
'description': 'A string',
'default': 'wibble',
'required': False,
'update_allowed': False,
'immutable': False,
'constraints': [
{'length': {'min': 4, 'max': 8}},
]
}
},
'required': False,
'update_allowed': False,
'immutable': False,
}
s = properties.Schema(properties.Schema.STRING, 'A string',
default='wibble',
constraints=[constraints.Length(4, 8)])
l = properties.Schema(properties.Schema.LIST, 'A list', schema=s)
self.assertEqual(d, dict(l))
def test_schema_map_schema(self):
d = {
'type': 'map',
'description': 'A map',
'schema': {
'Foo': {
'type': 'string',
'description': 'A string',
'default': 'wibble',
'required': False,
'update_allowed': False,
'immutable': False,
'constraints': [
{'length': {'min': 4, 'max': 8}},
]
}
},
'required': False,
'update_allowed': False,
'immutable': False,
}
s = properties.Schema(properties.Schema.STRING, 'A string',
default='wibble',
constraints=[constraints.Length(4, 8)])
m = properties.Schema(properties.Schema.MAP, 'A map',
schema={'Foo': s})
self.assertEqual(d, dict(m))
def test_schema_nested_schema(self):
d = {
'type': 'list',
'description': 'A list',
'schema': {
'*': {
'type': 'map',
'description': 'A map',
'schema': {
'Foo': {
'type': 'string',
'description': 'A string',
'default': 'wibble',
'required': False,
'update_allowed': False,
'immutable': False,
'constraints': [
{'length': {'min': 4, 'max': 8}},
]
}
},
'required': False,
'update_allowed': False,
'immutable': False,
}
},
'required': False,
'update_allowed': False,
'immutable': False,
}
s = properties.Schema(properties.Schema.STRING, 'A string',
default='wibble',
constraints=[constraints.Length(4, 8)])
m = properties.Schema(properties.Schema.MAP, 'A map',
schema={'Foo': s})
l = properties.Schema(properties.Schema.LIST, 'A list', schema=m)
self.assertEqual(d, dict(l))
def test_all_resource_schemata(self):
for resource_type in resources.global_env().get_types():
for schema in six.itervalues(getattr(resource_type,
'properties_schema',
{})):
properties.Schema.from_legacy(schema)
def test_from_legacy_idempotency(self):
s = properties.Schema(properties.Schema.STRING)
self.assertTrue(properties.Schema.from_legacy(s) is s)
def test_from_legacy_minimal_string(self):
s = properties.Schema.from_legacy({
'Type': 'String',
})
self.assertEqual(properties.Schema.STRING, s.type)
self.assertIsNone(s.description)
self.assertIsNone(s.default)
self.assertFalse(s.required)
self.assertEqual(0, len(s.constraints))
def test_from_legacy_string(self):
s = properties.Schema.from_legacy({
'Type': 'String',
'Description': 'a string',
'Default': 'wibble',
'Implemented': False,
'MinLength': 4,
'MaxLength': 8,
'AllowedValues': ['blarg', 'wibble'],
'AllowedPattern': '[a-z]*',
})
self.assertEqual(properties.Schema.STRING, s.type)
self.assertEqual('a string', s.description)
self.assertEqual('wibble', s.default)
self.assertFalse(s.required)
self.assertEqual(3, len(s.constraints))
self.assertFalse(s.immutable)
def test_from_legacy_min_length(self):
s = properties.Schema.from_legacy({
'Type': 'String',
'MinLength': 4,
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Length)
self.assertEqual(4, c.min)
self.assertIsNone(c.max)
def test_from_legacy_max_length(self):
s = properties.Schema.from_legacy({
'Type': 'String',
'MaxLength': 8,
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Length)
self.assertIsNone(c.min)
self.assertEqual(8, c.max)
def test_from_legacy_minmax_length(self):
s = properties.Schema.from_legacy({
'Type': 'String',
'MinLength': 4,
'MaxLength': 8,
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Length)
self.assertEqual(4, c.min)
self.assertEqual(8, c.max)
def test_from_legacy_minmax_string_length(self):
s = properties.Schema.from_legacy({
'Type': 'String',
'MinLength': '4',
'MaxLength': '8',
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Length)
self.assertEqual(4, c.min)
self.assertEqual(8, c.max)
def test_from_legacy_min_value(self):
s = properties.Schema.from_legacy({
'Type': 'Integer',
'MinValue': 4,
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Range)
self.assertEqual(4, c.min)
self.assertIsNone(c.max)
def test_from_legacy_max_value(self):
s = properties.Schema.from_legacy({
'Type': 'Integer',
'MaxValue': 8,
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Range)
self.assertIsNone(c.min)
self.assertEqual(8, c.max)
def test_from_legacy_minmax_value(self):
s = properties.Schema.from_legacy({
'Type': 'Integer',
'MinValue': 4,
'MaxValue': 8,
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Range)
self.assertEqual(4, c.min)
self.assertEqual(8, c.max)
def test_from_legacy_minmax_string_value(self):
s = properties.Schema.from_legacy({
'Type': 'Integer',
'MinValue': '4',
'MaxValue': '8',
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.Range)
self.assertEqual(4, c.min)
self.assertEqual(8, c.max)
def test_from_legacy_allowed_values(self):
s = properties.Schema.from_legacy({
'Type': 'String',
'AllowedValues': ['blarg', 'wibble'],
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.AllowedValues)
self.assertEqual(('blarg', 'wibble'), c.allowed)
def test_from_legacy_allowed_pattern(self):
s = properties.Schema.from_legacy({
'Type': 'String',
'AllowedPattern': '[a-z]*',
})
self.assertEqual(1, len(s.constraints))
c = s.constraints[0]
self.assertIsInstance(c, constraints.AllowedPattern)
self.assertEqual('[a-z]*', c.pattern)
def test_from_legacy_list(self):
l = properties.Schema.from_legacy({
'Type': 'List',
'Default': ['wibble'],
'Schema': {
'Type': 'String',
'Default': 'wibble',
'MaxLength': 8,
}
})
self.assertEqual(properties.Schema.LIST, l.type)
self.assertEqual(['wibble'], l.default)
ss = l.schema[0]
self.assertEqual(properties.Schema.STRING, ss.type)
self.assertEqual('wibble', ss.default)
def test_from_legacy_map(self):
l = properties.Schema.from_legacy({
'Type': 'Map',
'Schema': {
'foo': {
'Type': 'String',
'Default': 'wibble',
}
}
})
self.assertEqual(properties.Schema.MAP, l.type)
ss = l.schema['foo']
self.assertEqual(properties.Schema.STRING, ss.type)
self.assertEqual('wibble', ss.default)
def test_from_legacy_invalid_key(self):
self.assertRaises(exception.InvalidSchemaError,
properties.Schema.from_legacy,
{'Type': 'String', 'Foo': 'Bar'})
def test_from_string_param(self):
description = "WebServer EC2 instance type"
allowed_values = ["t1.micro", "m1.small", "m1.large", "m1.xlarge",
"m2.xlarge", "m2.2xlarge", "m2.4xlarge",
"c1.medium", "c1.xlarge", "cc1.4xlarge"]
constraint_desc = "Must be a valid EC2 instance type."
param = parameters.Schema.from_dict('name', {
"Type": "String",
"Description": description,
"Default": "m1.large",
"AllowedValues": allowed_values,
"ConstraintDescription": constraint_desc,
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.STRING, schema.type)
self.assertEqual(description, schema.description)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
allowed_constraint = schema.constraints[0]
self.assertEqual(tuple(allowed_values), allowed_constraint.allowed)
self.assertEqual(constraint_desc, allowed_constraint.description)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_string_allowed_pattern(self):
description = "WebServer EC2 instance type"
allowed_pattern = "[A-Za-z0-9.]*"
constraint_desc = "Must contain only alphanumeric characters."
param = parameters.Schema.from_dict('name', {
"Type": "String",
"Description": description,
"Default": "m1.large",
"AllowedPattern": allowed_pattern,
"ConstraintDescription": constraint_desc,
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.STRING, schema.type)
self.assertEqual(description, schema.description)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
allowed_constraint = schema.constraints[0]
self.assertEqual(allowed_pattern, allowed_constraint.pattern)
self.assertEqual(constraint_desc, allowed_constraint.description)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_string_multi_constraints(self):
description = "WebServer EC2 instance type"
allowed_pattern = "[A-Za-z0-9.]*"
constraint_desc = "Must contain only alphanumeric characters."
param = parameters.Schema.from_dict('name', {
"Type": "String",
"Description": description,
"Default": "m1.large",
"MinLength": "7",
"AllowedPattern": allowed_pattern,
"ConstraintDescription": constraint_desc,
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.STRING, schema.type)
self.assertEqual(description, schema.description)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertEqual(2, len(schema.constraints))
len_constraint = schema.constraints[0]
allowed_constraint = schema.constraints[1]
self.assertEqual(7, len_constraint.min)
self.assertIsNone(len_constraint.max)
self.assertEqual(allowed_pattern, allowed_constraint.pattern)
self.assertEqual(constraint_desc, allowed_constraint.description)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_param_string_min_len(self):
param = parameters.Schema.from_dict('name', {
"Description": "WebServer EC2 instance type",
"Type": "String",
"Default": "m1.large",
"MinLength": "7",
})
schema = properties.Schema.from_parameter(param)
self.assertFalse(schema.required)
self.assertIsNone(schema.default)
self.assertEqual(1, len(schema.constraints))
len_constraint = schema.constraints[0]
self.assertEqual(7, len_constraint.min)
self.assertIsNone(len_constraint.max)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_param_string_max_len(self):
param = parameters.Schema.from_dict('name', {
"Description": "WebServer EC2 instance type",
"Type": "String",
"Default": "m1.large",
"MaxLength": "11",
})
schema = properties.Schema.from_parameter(param)
self.assertFalse(schema.required)
self.assertIsNone(schema.default)
self.assertEqual(1, len(schema.constraints))
len_constraint = schema.constraints[0]
self.assertIsNone(len_constraint.min)
self.assertEqual(11, len_constraint.max)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_param_string_min_max_len(self):
param = parameters.Schema.from_dict('name', {
"Description": "WebServer EC2 instance type",
"Type": "String",
"Default": "m1.large",
"MinLength": "7",
"MaxLength": "11",
})
schema = properties.Schema.from_parameter(param)
self.assertFalse(schema.required)
self.assertIsNone(schema.default)
self.assertEqual(1, len(schema.constraints))
len_constraint = schema.constraints[0]
self.assertEqual(7, len_constraint.min)
self.assertEqual(11, len_constraint.max)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_param_no_default(self):
param = parameters.Schema.from_dict('name', {
"Description": "WebServer EC2 instance type",
"Type": "String",
})
schema = properties.Schema.from_parameter(param)
self.assertTrue(schema.required)
self.assertIsNone(schema.default)
self.assertEqual(0, len(schema.constraints))
self.assertFalse(schema.allow_conversion)
props = properties.Properties({'name': schema}, {'name': 'm1.large'})
props.validate()
def test_from_number_param_min(self):
param = parameters.Schema.from_dict('name', {
"Type": "Number",
"Default": "42",
"MinValue": "10",
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
value_constraint = schema.constraints[0]
self.assertEqual(10, value_constraint.min)
self.assertIsNone(value_constraint.max)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_number_param_max(self):
param = parameters.Schema.from_dict('name', {
"Type": "Number",
"Default": "42",
"MaxValue": "100",
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
value_constraint = schema.constraints[0]
self.assertIsNone(value_constraint.min)
self.assertEqual(100, value_constraint.max)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_number_param_min_max(self):
param = parameters.Schema.from_dict('name', {
"Type": "Number",
"Default": "42",
"MinValue": "10",
"MaxValue": "100",
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
value_constraint = schema.constraints[0]
self.assertEqual(10, value_constraint.min)
self.assertEqual(100, value_constraint.max)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_number_param_allowed_vals(self):
constraint_desc = "The quick brown fox jumps over the lazy dog."
param = parameters.Schema.from_dict('name', {
"Type": "Number",
"Default": "42",
"AllowedValues": ["10", "42", "100"],
"ConstraintDescription": constraint_desc,
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
self.assertFalse(schema.allow_conversion)
allowed_constraint = schema.constraints[0]
self.assertEqual(('10', '42', '100'), allowed_constraint.allowed)
self.assertEqual(constraint_desc, allowed_constraint.description)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_list_param(self):
param = parameters.Schema.from_dict('name', {
"Type": "CommaDelimitedList",
"Default": "foo,bar,baz"
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.LIST, schema.type)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertTrue(schema.allow_conversion)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_from_json_param(self):
param = parameters.Schema.from_dict('name', {
"Type": "Json",
"Default": {"foo": "bar", "blarg": "wibble"}
})
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.MAP, schema.type)
self.assertIsNone(schema.default)
self.assertFalse(schema.required)
self.assertTrue(schema.allow_conversion)
props = properties.Properties({'test': schema}, {})
props.validate()
def test_no_mismatch_in_update_policy(self):
manager = plugin_manager.PluginManager('heat.engine.resources')
resource_mapping = plugin_manager.PluginMapping('resource')
res_plugin_mappings = resource_mapping.load_all(manager)
all_resources = {}
for mapping in res_plugin_mappings:
name, cls = mapping
all_resources[name] = cls
def check_update_policy(resource_type, prop_key, prop, update=False):
if prop.update_allowed:
update = True
sub_schema = prop.schema
if sub_schema:
for sub_prop_key, sub_prop in six.iteritems(sub_schema):
if not update:
self.assertEqual(update, sub_prop.update_allowed,
"Mismatch in update policies: "
"resource %(res)s, properties "
"'%(prop)s' and '%(nested_prop)s'." %
{'res': resource_type,
'prop': prop_key,
'nested_prop': sub_prop_key})
if sub_prop_key == '*':
check_update_policy(resource_type, prop_key,
sub_prop, update)
else:
check_update_policy(resource_type, sub_prop_key,
sub_prop, update)
for resource_type, resource_class in six.iteritems(all_resources):
props_schemata = properties.schemata(
resource_class.properties_schema)
for prop_key, prop in six.iteritems(props_schemata):
check_update_policy(resource_type, prop_key, prop)
class PropertyTest(common.HeatTestCase):
def test_required_default(self):
p = properties.Property({'Type': 'String'})
self.assertFalse(p.required())
def test_required_false(self):
p = properties.Property({'Type': 'String', 'Required': False})
self.assertFalse(p.required())
def test_required_true(self):
p = properties.Property({'Type': 'String', 'Required': True})
self.assertTrue(p.required())
def test_implemented_default(self):
p = properties.Property({'Type': 'String'})
self.assertTrue(p.implemented())
def test_implemented_false(self):
p = properties.Property({'Type': 'String', 'Implemented': False})
self.assertFalse(p.implemented())
def test_implemented_true(self):
p = properties.Property({'Type': 'String', 'Implemented': True})
self.assertTrue(p.implemented())
def test_no_default(self):
p = properties.Property({'Type': 'String'})
self.assertFalse(p.has_default())
def test_default(self):
p = properties.Property({'Type': 'String', 'Default': 'wibble'})
self.assertEqual('wibble', p.default())
def test_type(self):
p = properties.Property({'Type': 'String'})
self.assertEqual('String', p.type())
def test_bad_type(self):
self.assertRaises(exception.InvalidSchemaError,
properties.Property, {'Type': 'Fish'})
def test_bad_key(self):
self.assertRaises(exception.InvalidSchemaError,
properties.Property,
{'Type': 'String', 'Foo': 'Bar'})
def test_string_pattern_good(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = properties.Property(schema)
self.assertEqual('foo', p.get_value('foo', True))
def test_string_pattern_bad_prefix(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, '1foo', True)
def test_string_pattern_bad_suffix(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, 'foo1', True)
def test_string_value_list_good(self):
schema = {'Type': 'String',
'AllowedValues': ['foo', 'bar', 'baz']}
p = properties.Property(schema)
self.assertEqual('bar', p.get_value('bar', True))
def test_string_value_list_bad(self):
schema = {'Type': 'String',
'AllowedValues': ['foo', 'bar', 'baz']}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, 'blarg', True)
def test_string_maxlength_good(self):
schema = {'Type': 'String',
'MaxLength': '5'}
p = properties.Property(schema)
self.assertEqual('abcd', p.get_value('abcd', True))
def test_string_exceeded_maxlength(self):
schema = {'Type': 'String',
'MaxLength': '5'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, 'abcdef', True)
def test_string_length_in_range(self):
schema = {'Type': 'String',
'MinLength': '5',
'MaxLength': '10'}
p = properties.Property(schema)
self.assertEqual('abcdef', p.get_value('abcdef', True))
def test_string_minlength_good(self):
schema = {'Type': 'String',
'MinLength': '5'}
p = properties.Property(schema)
self.assertEqual('abcde', p.get_value('abcde', True))
def test_string_smaller_than_minlength(self):
schema = {'Type': 'String',
'MinLength': '5'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, 'abcd', True)
def test_int_good(self):
schema = {'Type': 'Integer',
'MinValue': 3,
'MaxValue': 3}
p = properties.Property(schema)
self.assertEqual(3, p.get_value(3, True))
def test_int_bad(self):
schema = {'Type': 'Integer'}
p = properties.Property(schema)
# python 3.4.3 returns another error message
# try to handle this by regexp
self.assertRaisesRegex(
TypeError, r"int\(\) argument must be a string"
"(, a bytes-like object)?"
" or a number, not 'list'", p.get_value, [1])
def test_str_from_int(self):
schema = {'Type': 'String'}
p = properties.Property(schema)
self.assertEqual('3', p.get_value(3))
def test_str_from_bool(self):
schema = {'Type': 'String'}
p = properties.Property(schema)
self.assertEqual('True', p.get_value(True))
def test_int_from_str_good(self):
schema = {'Type': 'Integer'}
p = properties.Property(schema)
self.assertEqual(3, p.get_value('3'))
def test_int_from_str_bad(self):
schema = {'Type': 'Integer'}
p = properties.Property(schema)
ex = self.assertRaises(TypeError, p.get_value, '3a')
self.assertEqual("Value '3a' is not an integer", six.text_type(ex))
def test_integer_low(self):
schema = {'Type': 'Integer',
'MinValue': 4}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed, p.get_value, 3,
True)
def test_integer_high(self):
schema = {'Type': 'Integer',
'MaxValue': 2}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed, p.get_value, 3,
True)
def test_integer_value_list_good(self):
schema = {'Type': 'Integer',
'AllowedValues': [1, 3, 5]}
p = properties.Property(schema)
self.assertEqual(5, p.get_value(5), True)
def test_integer_value_list_bad(self):
schema = {'Type': 'Integer',
'AllowedValues': [1, 3, 5]}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed, p.get_value, 2,
True)
def test_number_good(self):
schema = {'Type': 'Number',
'MinValue': '3',
'MaxValue': '3'}
p = properties.Property(schema)
self.assertEqual(3, p.get_value(3, True))
def test_numbers_from_strings(self):
"""Numbers can be converted from strings."""
schema = {'Type': 'Number',
'MinValue': '3',
'MaxValue': '3'}
p = properties.Property(schema)
self.assertEqual(3, p.get_value('3'))
def test_number_value_list_good(self):
schema = {'Type': 'Number',
'AllowedValues': [1, 3, 5]}
p = properties.Property(schema)
self.assertEqual(5, p.get_value('5', True))
def test_number_value_list_bad(self):
schema = {'Type': 'Number',
'AllowedValues': ['1', '3', '5']}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, '2', True)
def test_number_low(self):
schema = {'Type': 'Number',
'MinValue': '4'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, '3', True)
def test_number_high(self):
schema = {'Type': 'Number',
'MaxValue': '2'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, '3', True)
def test_boolean_true(self):
p = properties.Property({'Type': 'Boolean'})
self.assertIs(True, p.get_value('True'))
self.assertIs(True, p.get_value('true'))
self.assertIs(True, p.get_value(True))
def test_boolean_false(self):
p = properties.Property({'Type': 'Boolean'})
self.assertIs(False, p.get_value('False'))
self.assertIs(False, p.get_value('false'))
self.assertIs(False, p.get_value(False))
def test_boolean_invalid_string(self):
p = properties.Property({'Type': 'Boolean'})
self.assertRaises(ValueError, p.get_value, 'fish')
def test_boolean_invalid_int(self):
p = properties.Property({'Type': 'Boolean'})
self.assertRaises(TypeError, p.get_value, 5)
def test_list_string(self):
p = properties.Property({'Type': 'List'})
self.assertRaises(TypeError, p.get_value, 'foo')
def test_list_good(self):
p = properties.Property({'Type': 'List'})
self.assertEqual(['foo', 'bar'], p.get_value(['foo', 'bar']))
def test_list_dict(self):
p = properties.Property({'Type': 'List'})
self.assertRaises(TypeError, p.get_value, {'foo': 'bar'})
def test_list_is_delimited(self):
p = properties.Property({'Type': 'List'})
self.assertRaises(TypeError, p.get_value, 'foo,bar')
p.schema.allow_conversion = True
self.assertEqual(['foo', 'bar'], p.get_value('foo,bar'))
self.assertEqual(['foo'], p.get_value('foo'))
def test_map_path(self):
p = properties.Property({'Type': 'Map'}, name='test', path='parent')
self.assertEqual('parent.test', p.path)
def test_list_path(self):
p = properties.Property({'Type': 'List'}, name='0', path='parent')
self.assertEqual('parent.0', p.path)
def test_list_maxlength_good(self):
schema = {'Type': 'List',
'MaxLength': '3'}
p = properties.Property(schema)
self.assertEqual(['1', '2'], p.get_value(['1', '2'], True))
def test_list_exceeded_maxlength(self):
schema = {'Type': 'List',
'MaxLength': '2'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, ['1', '2', '3'], True)
def test_list_length_in_range(self):
schema = {'Type': 'List',
'MinLength': '2',
'MaxLength': '4'}
p = properties.Property(schema)
self.assertEqual(['1', '2', '3'], p.get_value(['1', '2', '3'], True))
def test_list_minlength_good(self):
schema = {'Type': 'List',
'MinLength': '3'}
p = properties.Property(schema)
self.assertEqual(['1', '2', '3'], p.get_value(['1', '2', '3'], True))
def test_list_smaller_than_minlength(self):
schema = {'Type': 'List',
'MinLength': '4'}
p = properties.Property(schema)
self.assertRaises(exception.StackValidationFailed,
p.get_value, ['1', '2', '3'], True)
def test_map_list_default(self):
schema = {'Type': 'Map',
'Default': ['foo', 'bar']}
p = properties.Property(schema)
p.schema.allow_conversion = True
self.assertEqual(jsonutils.dumps(['foo', 'bar']),
p.get_value(None))
def test_map_list_default_empty(self):
schema = {'Type': 'Map',
'Default': []}
p = properties.Property(schema)
p.schema.allow_conversion = True
self.assertEqual(jsonutils.dumps([]), p.get_value(None))
def test_map_list_no_default(self):
schema = {'Type': 'Map'}
p = properties.Property(schema)
p.schema.allow_conversion = True
self.assertEqual({}, p.get_value(None))
def test_map_string(self):
p = properties.Property({'Type': 'Map'})
self.assertRaises(TypeError, p.get_value, 'foo')
def test_map_list(self):
p = properties.Property({'Type': 'Map'})
self.assertRaises(TypeError, p.get_value, ['foo'])
def test_map_allow_conversion(self):
p = properties.Property({'Type': 'Map'})
p.schema.allow_conversion = True
self.assertEqual('foo', p.get_value('foo'))
self.assertEqual(jsonutils.dumps(['foo']), p.get_value(['foo']))
def test_map_schema_good(self):
map_schema = {'valid': {'Type': 'Boolean'}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
self.assertEqual({'valid': True}, p.get_value({'valid': 'TRUE'}))
def test_map_schema_bad_data(self):
map_schema = {'valid': {'Type': 'Boolean'}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
ex = self.assertRaises(exception.StackValidationFailed,
p.get_value, {'valid': 'fish'}, True)
self.assertEqual('Property error: valid: "fish" is not a '
'valid boolean', six.text_type(ex))
def test_map_schema_missing_data(self):
map_schema = {'valid': {'Type': 'Boolean'}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
self.assertEqual({'valid': None}, p.get_value({}))
def test_map_schema_missing_required_data(self):
map_schema = {'valid': {'Type': 'Boolean', 'Required': True}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
ex = self.assertRaises(exception.StackValidationFailed,
p.get_value, {}, True)
self.assertEqual('Property error: Property valid not assigned',
six.text_type(ex))
def test_list_schema_good(self):
map_schema = {'valid': {'Type': 'Boolean'}}
list_schema = {'Type': 'Map', 'Schema': map_schema}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
self.assertEqual([{'valid': True},
{'valid': False}],
p.get_value([{'valid': 'TRUE'},
{'valid': 'False'}]))
def test_list_schema_bad_data(self):
map_schema = {'valid': {'Type': 'Boolean'}}
list_schema = {'Type': 'Map', 'Schema': map_schema}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
ex = self.assertRaises(exception.StackValidationFailed,
p.get_value,
[{'valid': 'True'}, {'valid': 'fish'}], True)
self.assertEqual('Property error: [1].valid: "fish" is not '
'a valid boolean', six.text_type(ex))
def test_list_schema_int_good(self):
list_schema = {'Type': 'Integer'}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
self.assertEqual([1, 2, 3], p.get_value([1, 2, 3]))
def test_list_schema_int_bad_data(self):
list_schema = {'Type': 'Integer'}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
ex = self.assertRaises(exception.StackValidationFailed,
p.get_value, [42, 'fish'], True)
self.assertEqual("Property error: [1]: Value 'fish' is not "
"an integer", six.text_type(ex))
class PropertiesTest(common.HeatTestCase):
def setUp(self):
super(PropertiesTest, self).setUp()
schema = {
'int': {'Type': 'Integer'},
'string': {'Type': 'String'},
'required_int': {'Type': 'Integer', 'Required': True},
'bad_int': {'Type': 'Integer'},
'missing': {'Type': 'Integer'},
'defaulted': {'Type': 'Integer', 'Default': 1},
'default_override': {'Type': 'Integer', 'Default': 1},
'default_bool': {'Type': 'Boolean', 'Default': 'false'},
}
data = {
'int': 21,
'string': 'foo',
'bad_int': 'foo',
'default_override': 21,
}
def double(d):
return d * 2
self.props = properties.Properties(schema, data, double, 'wibble')
def test_integer_good(self):
self.assertEqual(42, self.props['int'])
def test_string_good(self):
self.assertEqual('foofoo', self.props['string'])
def test_bool_not_str(self):
self.assertFalse(self.props['default_bool'])
def test_missing_required(self):
self.assertRaises(ValueError, self.props.get, 'required_int')
@mock.patch.object(translation.Translation, 'has_translation')
@mock.patch.object(translation.Translation, 'translate')
def test_required_with_translate_no_value(self, m_t, m_ht):
m_t.return_value = None
m_ht.return_value = True
self.assertRaises(ValueError, self.props.get, 'required_int')
def test_integer_bad(self):
self.assertRaises(ValueError, self.props.get, 'bad_int')
def test_missing(self):
self.assertIsNone(self.props['missing'])
def test_default(self):
self.assertEqual(1, self.props['defaulted'])
def test_default_override(self):
self.assertEqual(42, self.props['default_override'])
def test_get_user_value(self):
self.assertIsNone(self.props.get_user_value('defaulted'))
self.assertEqual(42, self.props.get_user_value('default_override'))
def test_get_user_value_key_error(self):
ex = self.assertRaises(KeyError, self.props.get_user_value, 'foo')
# Note we have to use args here: https://bugs.python.org/issue2651
self.assertEqual('Invalid Property foo',
six.text_type(ex.args[0]))
def test_bad_key(self):
self.assertEqual('wibble', self.props.get('foo', 'wibble'))
def test_key_error(self):
ex = self.assertRaises(KeyError, self.props.__getitem__, 'foo')
# Note we have to use args here: https://bugs.python.org/issue2651
self.assertEqual('Invalid Property foo',
six.text_type(ex.args[0]))
def test_none_string(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual('', props['foo'])
def test_none_integer(self):
schema = {'foo': {'Type': 'Integer'}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(0, props['foo'])
def test_none_number(self):
schema = {'foo': {'Type': 'Number'}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(0, props['foo'])
def test_none_boolean(self):
schema = {'foo': {'Type': 'Boolean'}}
props = properties.Properties(schema, {'foo': None})
self.assertIs(False, props['foo'])
def test_none_map(self):
schema = {'foo': {'Type': 'Map'}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual({}, props['foo'])
def test_none_list(self):
schema = {'foo': {'Type': 'List'}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual([], props['foo'])
def test_none_default_string(self):
schema = {'foo': {'Type': 'String', 'Default': 'bar'}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual('bar', props['foo'])
def test_none_default_integer(self):
schema = {'foo': {'Type': 'Integer', 'Default': 42}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(42, props['foo'])
schema = {'foo': {'Type': 'Integer', 'Default': 0}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(0, props['foo'])
schema = {'foo': {'Type': 'Integer', 'Default': -273}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(-273, props['foo'])
def test_none_default_number(self):
schema = {'foo': {'Type': 'Number', 'Default': 42.0}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(42.0, props['foo'])
schema = {'foo': {'Type': 'Number', 'Default': 0.0}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(0.0, props['foo'])
schema = {'foo': {'Type': 'Number', 'Default': -273.15}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(-273.15, props['foo'])
def test_none_default_boolean(self):
schema = {'foo': {'Type': 'Boolean', 'Default': True}}
props = properties.Properties(schema, {'foo': None})
self.assertIs(True, props['foo'])
def test_none_default_map(self):
schema = {'foo': {'Type': 'Map', 'Default': {'bar': 'baz'}}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual({'bar': 'baz'}, props['foo'])
def test_none_default_list(self):
schema = {'foo': {'Type': 'List', 'Default': ['one', 'two']}}
props = properties.Properties(schema, {'foo': None})
self.assertEqual(['one', 'two'], props['foo'])
def test_resolve_returns_none(self):
schema = {'foo': {'Type': 'String', "MinLength": "5"}}
def test_resolver(prop):
return None
self.patchobject(properties.Properties,
'_find_deps_any_in_init').return_value = True
props = properties.Properties(schema,
{'foo': 'get_attr: [db, value]'},
test_resolver)
try:
self.assertIsNone(props.validate())
except exception.StackValidationFailed:
self.fail("Constraints should not have been evaluated.")
def test_resolve_ref_with_constraints(self):
# create test custom constraint
class IncorrectConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (Exception,)
def validate_with_client(self, client, value):
raise Exception("Test exception")
class TestCustomConstraint(constraints.CustomConstraint):
@property
def custom_constraint(self):
return IncorrectConstraint()
# create schema with test constraint
schema = {
'foo': properties.Schema(
properties.Schema.STRING,
constraints=[TestCustomConstraint('test_constraint')]
)
}
# define parameters for function
def test_resolver(prop):
return 'None'
class rsrc(object):
action = INIT = "INIT"
class DummyStack(dict):
pass
stack = DummyStack(another_res=rsrc())
# define properties with function and constraint
props = properties.Properties(
schema,
{'foo': hot_funcs.GetResource(
stack, 'get_resource', 'another_res')},
test_resolver)
try:
self.assertIsNone(props.validate())
except exception.StackValidationFailed:
self.fail("Constraints should not have been evaluated.")
def test_schema_from_params(self):
params_snippet = {
"DBUsername": {
"Type": "String",
"Description": "The WordPress database admin account username",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "16",
"ConstraintDescription": ("must begin with a letter and "
"contain only alphanumeric "
"characters.")
},
"KeyName": {
"Type": "String",
"Description": ("Name of an existing EC2 KeyPair to enable "
"SSH access to the instances")
},
"LinuxDistribution": {
"Default": "F17",
"Type": "String",
"Description": "Distribution of choice",
"AllowedValues": [
"F18",
"F17",
"U10",
"RHEL-6.1",
"RHEL-6.2",
"RHEL-6.3"
]
},
"DBPassword": {
"Type": "String",
"Description": "The WordPress database admin account password",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": ("must contain only alphanumeric "
"characters.")
},
"DBName": {
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"Type": "String",
"Description": "The WordPress database name",
"MaxLength": "64",
"Default": "wordpress",
"MinLength": "1",
"ConstraintDescription": ("must begin with a letter and "
"contain only alphanumeric "
"characters.")
},
"InstanceType": {
"Default": "m1.large",
"Type": "String",
"ConstraintDescription": "must be a valid EC2 instance type.",
"Description": "WebServer EC2 instance type",
"AllowedValues": [
"t1.micro",
"m1.small",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge"
]
},
"DBRootPassword": {
"Type": "String",
"Description": "Root password for MySQL",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": ("must contain only alphanumeric "
"characters.")
}
}
expected = {
"DBUsername": {
"type": "string",
"description": "The WordPress database admin account username",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 16},
"description": "must begin with a letter and contain "
"only alphanumeric characters."},
{"allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*",
"description": "must begin with a letter and contain "
"only alphanumeric characters."},
]
},
"LinuxDistribution": {
"type": "string",
"description": "Distribution of choice",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"allowed_values": ["F18", "F17", "U10",
"RHEL-6.1", "RHEL-6.2", "RHEL-6.3"]}
]
},
"InstanceType": {
"type": "string",
"description": "WebServer EC2 instance type",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"allowed_values": ["t1.micro",
"m1.small",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge"],
"description": "must be a valid EC2 instance type."},
]
},
"DBRootPassword": {
"type": "string",
"description": "Root password for MySQL",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 41},
"description": "must contain only alphanumeric "
"characters."},
{"allowed_pattern": "[a-zA-Z0-9]*",
"description": "must contain only alphanumeric "
"characters."},
]
},
"KeyName": {
"type": "string",
"description": ("Name of an existing EC2 KeyPair to enable "
"SSH access to the instances"),
"required": True,
'update_allowed': True,
'immutable': False,
},
"DBPassword": {
"type": "string",
"description": "The WordPress database admin account password",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 41},
"description": "must contain only alphanumeric "
"characters."},
{"allowed_pattern": "[a-zA-Z0-9]*",
"description": "must contain only alphanumeric "
"characters."},
]
},
"DBName": {
"type": "string",
"description": "The WordPress database name",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 64},
"description": "must begin with a letter and contain "
"only alphanumeric characters."},
{"allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*",
"description": "must begin with a letter and contain "
"only alphanumeric characters."},
]
},
}
params = dict((n, parameters.Schema.from_dict(n, s)) for n, s
in params_snippet.items())
props_schemata = properties.Properties.schema_from_params(params)
self.assertEqual(expected,
dict((n, dict(s)) for n, s in props_schemata.items()))
def test_schema_from_hot_params(self):
params_snippet = {
"KeyName": {
"type": "string",
"description": ("Name of an existing EC2 KeyPair to enable "
"SSH access to the instances")
},
"InstanceType": {
"default": "m1.large",
"type": "string",
"description": "WebServer EC2 instance type",
"constraints": [
{"allowed_values": ["t1.micro", "m1.small", "m1.large",
"m1.xlarge", "m2.xlarge", "m2.2xlarge",
"m2.4xlarge", "c1.medium", "c1.xlarge",
"cc1.4xlarge"],
"description": "Must be a valid EC2 instance type."}
]
},
"LinuxDistribution": {
"default": "F17",
"type": "string",
"description": "Distribution of choice",
"constraints": [
{"allowed_values": ["F18", "F17", "U10", "RHEL-6.1",
"RHEL-6.2", "RHEL-6.3"],
"description": "Must be a valid Linux distribution"}
]
},
"DBName": {
"type": "string",
"description": "The WordPress database name",
"default": "wordpress",
"constraints": [
{"length": {"min": 1, "max": 64},
"description": "Length must be between 1 and 64"},
{"allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*",
"description": ("Must begin with a letter and contain "
"only alphanumeric characters.")}
]
},
"DBUsername": {
"type": "string",
"description": "The WordPress database admin account username",
"default": "admin",
"hidden": "true",
"constraints": [
{"length": {"min": 1, "max": 16},
"description": "Length must be between 1 and 16"},
{"allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*",
"description": ("Must begin with a letter and only "
"contain alphanumeric characters")}
]
},
"DBPassword": {
"type": "string",
"description": "The WordPress database admin account password",
"default": "admin",
"hidden": "true",
"constraints": [
{"length": {"min": 1, "max": 41},
"description": "Length must be between 1 and 41"},
{"allowed_pattern": "[a-zA-Z0-9]*",
"description": ("Must contain only alphanumeric "
"characters")}
]
},
"DBRootPassword": {
"type": "string",
"description": "Root password for MySQL",
"default": "admin",
"hidden": "true",
"constraints": [
{"length": {"min": 1, "max": 41},
"description": "Length must be between 1 and 41"},
{"allowed_pattern": "[a-zA-Z0-9]*",
"description": ("Must contain only alphanumeric "
"characters")}
]
}
}
expected = {
"KeyName": {
"type": "string",
"description": ("Name of an existing EC2 KeyPair to enable "
"SSH access to the instances"),
"required": True,
'update_allowed': True,
'immutable': False,
},
"InstanceType": {
"type": "string",
"description": "WebServer EC2 instance type",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"allowed_values": ["t1.micro", "m1.small", "m1.large",
"m1.xlarge", "m2.xlarge", "m2.2xlarge",
"m2.4xlarge", "c1.medium", "c1.xlarge",
"cc1.4xlarge"],
"description": "Must be a valid EC2 instance type."},
]
},
"LinuxDistribution": {
"type": "string",
"description": "Distribution of choice",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"allowed_values": ["F18", "F17", "U10",
"RHEL-6.1", "RHEL-6.2", "RHEL-6.3"],
"description": "Must be a valid Linux distribution"}
]
},
"DBName": {
"type": "string",
"description": "The WordPress database name",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 64},
"description": "Length must be between 1 and 64"},
{"allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*",
"description": ("Must begin with a letter and contain "
"only alphanumeric characters.")},
]
},
"DBUsername": {
"type": "string",
"description": "The WordPress database admin account username",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 16},
"description": "Length must be between 1 and 16"},
{"allowed_pattern": "[a-zA-Z][a-zA-Z0-9]*",
"description": ("Must begin with a letter and only "
"contain alphanumeric characters")},
]
},
"DBPassword": {
"type": "string",
"description": "The WordPress database admin account password",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 41},
"description": "Length must be between 1 and 41"},
{"allowed_pattern": "[a-zA-Z0-9]*",
"description": ("Must contain only alphanumeric "
"characters")},
]
},
"DBRootPassword": {
"type": "string",
"description": "Root password for MySQL",
"required": False,
'update_allowed': True,
'immutable': False,
"constraints": [
{"length": {"min": 1, "max": 41},
"description": "Length must be between 1 and 41"},
{"allowed_pattern": "[a-zA-Z0-9]*",
"description": ("Must contain only alphanumeric "
"characters")},
]
}
}
params = dict((n, hot_param.HOTParamSchema.from_dict(n, s)) for n, s
in params_snippet.items())
props_schemata = properties.Properties.schema_from_params(params)
self.assertEqual(expected,
dict((n, dict(s)) for n, s in props_schemata.items()))
def test_compare_same(self):
schema = {'foo': {'Type': 'Integer'}}
props_a = properties.Properties(schema, {'foo': 1})
props_b = properties.Properties(schema, {'foo': 1})
self.assertFalse(props_a != props_b)
def test_compare_different(self):
schema = {'foo': {'Type': 'Integer'}}
props_a = properties.Properties(schema, {'foo': 0})
props_b = properties.Properties(schema, {'foo': 1})
self.assertTrue(props_a != props_b)
class PropertiesValidationTest(common.HeatTestCase):
def test_required(self):
schema = {'foo': {'Type': 'String', 'Required': True}}
props = properties.Properties(schema, {'foo': 'bar'})
self.assertIsNone(props.validate())
def test_missing_required(self):
schema = {'foo': {'Type': 'String', 'Required': True}}
props = properties.Properties(schema, {})
self.assertRaises(exception.StackValidationFailed, props.validate)
def test_missing_unimplemented(self):
schema = {'foo': {'Type': 'String', 'Implemented': False}}
props = properties.Properties(schema, {})
self.assertIsNone(props.validate())
def test_present_unimplemented(self):
schema = {'foo': {'Type': 'String', 'Implemented': False}}
props = properties.Properties(schema, {'foo': 'bar'})
self.assertRaises(exception.StackValidationFailed, props.validate)
def test_missing(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {})
self.assertIsNone(props.validate())
def test_unknown_typo(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'food': 42})
self.assertRaises(exception.StackValidationFailed, props.validate)
def test_list_instead_string(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': ['foo', 'bar']})
ex = self.assertRaises(exception.StackValidationFailed, props.validate)
self.assertIn('Property error: foo: Value must be a string',
six.text_type(ex))
def test_dict_instead_string(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': {'foo': 'bar'}})
ex = self.assertRaises(exception.StackValidationFailed, props.validate)
self.assertIn('Property error: foo: Value must be a string',
six.text_type(ex))
def test_none_string(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_integer(self):
schema = {'foo': {'Type': 'Integer'}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_number(self):
schema = {'foo': {'Type': 'Number'}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_boolean(self):
schema = {'foo': {'Type': 'Boolean'}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_map(self):
schema = {'foo': {'Type': 'Map'}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_list(self):
schema = {'foo': {'Type': 'List'}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_default_string(self):
schema = {'foo': {'Type': 'String', 'Default': 'bar'}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_default_integer(self):
schema = {'foo': {'Type': 'Integer', 'Default': 42}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_default_number(self):
schema = {'foo': {'Type': 'Number', 'Default': 42.0}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_default_boolean(self):
schema = {'foo': {'Type': 'Boolean', 'Default': True}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_default_map(self):
schema = {'foo': {'Type': 'Map', 'Default': {'bar': 'baz'}}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_none_default_list(self):
schema = {'foo': {'Type': 'List', 'Default': ['one', 'two']}}
props = properties.Properties(schema, {'foo': None})
self.assertIsNone(props.validate())
def test_schema_to_template_nested_map_map_schema(self):
nested_schema = {'Key': {'Type': 'String',
'Required': True},
'Value': {'Type': 'String',
'Default': 'fewaf'}}
schema = {'foo': {'Type': 'Map', 'Schema': nested_schema}}
prop_expected = {'foo': {'Ref': 'foo'}}
param_expected = {'foo': {'Type': 'Json'}}
(parameters,
props) = properties.Properties.schema_to_parameters_and_properties(
schema)
self.assertEqual(param_expected, parameters)
self.assertEqual(prop_expected, props)
def test_schema_to_template_nested_map_list_map_schema(self):
key_schema = {'bar': {'Type': 'Number'}}
nested_schema = {'Key': {'Type': 'Map', 'Schema': key_schema},
'Value': {'Type': 'String',
'Required': True}}
schema = {'foo': {'Type': 'List', 'Schema': {'Type': 'Map',
'Schema': nested_schema}}}
prop_expected = {'foo': {'Fn::Split': [",", {'Ref': 'foo'}]}}
param_expected = {'foo': {'Type': 'CommaDelimitedList'}}
(parameters,
props) = properties.Properties.schema_to_parameters_and_properties(
schema)
self.assertEqual(param_expected, parameters)
self.assertEqual(prop_expected, props)
def test_schema_object_to_template_nested_map_list_map_schema(self):
key_schema = {'bar': properties.Schema(properties.Schema.NUMBER)}
nested_schema = {
'Key': properties.Schema(properties.Schema.MAP, schema=key_schema),
'Value': properties.Schema(properties.Schema.STRING, required=True)
}
schema = {
'foo': properties.Schema(properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema=nested_schema))
}
prop_expected = {'foo': {'Fn::Split': [",", {'Ref': 'foo'}]}}
param_expected = {'foo': {'Type': 'CommaDelimitedList'}}
(parameters,
props) = properties.Properties.schema_to_parameters_and_properties(
schema)
self.assertEqual(param_expected, parameters)
self.assertEqual(prop_expected, props)
def test_schema_invalid_parameters_stripped(self):
schema = {'foo': {'Type': 'String',
'Required': True,
'Implemented': True}}
prop_expected = {'foo': {'Ref': 'foo'}}
param_expected = {'foo': {'Type': 'String'}}
(parameters,
props) = properties.Properties.schema_to_parameters_and_properties(
schema)
self.assertEqual(param_expected, parameters)
self.assertEqual(prop_expected, props)
def test_schema_support_status(self):
schema = {
'foo_sup': properties.Schema(
properties.Schema.STRING,
default='foo'
),
'bar_dep': properties.Schema(
properties.Schema.STRING,
default='bar',
support_status=support.SupportStatus(
support.DEPRECATED,
'Do not use this ever')
)
}
props = properties.Properties(schema, {})
self.assertEqual(support.SUPPORTED,
props.props['foo_sup'].support_status().status)
self.assertEqual(support.DEPRECATED,
props.props['bar_dep'].support_status().status)
self.assertEqual('Do not use this ever',
props.props['bar_dep'].support_status().message)
def test_nested_properties_schema_invalid_property_in_list(self):
child_schema = {'Key': {'Type': 'String',
'Required': True},
'Value': {'Type': 'Boolean',
'Default': True}}
list_schema = {'Type': 'Map', 'Schema': child_schema}
schema = {'foo': {'Type': 'List', 'Schema': list_schema}}
valid_data = {'foo': [{'Key': 'Test'}]}
props = properties.Properties(schema, valid_data)
self.assertIsNone(props.validate())
invalid_data = {'foo': [{'Key': 'Test', 'bar': 'baz'}]}
props = properties.Properties(schema, invalid_data)
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo[0]: Unknown Property bar',
six.text_type(ex))
def test_nested_properties_schema_invalid_property_in_map(self):
child_schema = {'Key': {'Type': 'String',
'Required': True},
'Value': {'Type': 'Boolean',
'Default': True}}
map_schema = {'boo': {'Type': 'Map', 'Schema': child_schema}}
schema = {'foo': {'Type': 'Map', 'Schema': map_schema}}
valid_data = {'foo': {'boo': {'Key': 'Test'}}}
props = properties.Properties(schema, valid_data)
self.assertIsNone(props.validate())
invalid_data = {'foo': {'boo': {'Key': 'Test', 'bar': 'baz'}}}
props = properties.Properties(schema, invalid_data)
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo.boo: Unknown Property bar',
six.text_type(ex))
def test_more_nested_properties_schema_invalid_property_in_list(self):
nested_child_schema = {'Key': {'Type': 'String',
'Required': True}}
child_schema = {'doo': {'Type': 'Map', 'Schema': nested_child_schema}}
list_schema = {'Type': 'Map', 'Schema': child_schema}
schema = {'foo': {'Type': 'List', 'Schema': list_schema}}
valid_data = {'foo': [{'doo': {'Key': 'Test'}}]}
props = properties.Properties(schema, valid_data)
self.assertIsNone(props.validate())
invalid_data = {'foo': [{'doo': {'Key': 'Test', 'bar': 'baz'}}]}
props = properties.Properties(schema, invalid_data)
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo[0].doo: Unknown Property bar',
six.text_type(ex))
def test_more_nested_properties_schema_invalid_property_in_map(self):
nested_child_schema = {'Key': {'Type': 'String',
'Required': True}}
child_schema = {'doo': {'Type': 'Map', 'Schema': nested_child_schema}}
map_schema = {'boo': {'Type': 'Map', 'Schema': child_schema}}
schema = {'foo': {'Type': 'Map', 'Schema': map_schema}}
valid_data = {'foo': {'boo': {'doo': {'Key': 'Test'}}}}
props = properties.Properties(schema, valid_data)
self.assertIsNone(props.validate())
invalid_data = {'foo': {'boo': {'doo': {'Key': 'Test', 'bar': 'baz'}}}}
props = properties.Properties(schema, invalid_data)
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo.boo.doo: Unknown Property bar',
six.text_type(ex))
def test_schema_to_template_empty_schema(self):
schema = {}
(parameters,
props) = properties.Properties.schema_to_parameters_and_properties(
schema)
self.assertEqual({}, parameters)
self.assertEqual({}, props)
def test_update_allowed_and_immutable_contradict(self):
self.assertRaises(exception.InvalidSchemaError,
properties.Schema,
properties.Schema.STRING,
update_allowed=True,
immutable=True)
|
{
"content_hash": "46c2d2278ae09744397851eacce6bf17",
"timestamp": "",
"source": "github",
"line_count": 1923,
"max_line_length": 79,
"avg_line_length": 39.269890795631824,
"alnum_prop": 0.5152020763811642,
"repo_name": "noironetworks/heat",
"id": "cf5dd14ece1e9575fc0f90dd07ac082b3242c2ad",
"size": "76091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8804896"
},
{
"name": "Shell",
"bytes": "64533"
}
],
"symlink_target": ""
}
|
import json
import numpy as np
import pandas as pd
def get_json_data(filename):
with open(filename) as data_file:
return json.load(data_file)
class Batch():
def __init__(self, data, batch_size=None):
self.data = np.array(data)
self.batch_size = batch_size
self.shuffle().split().batch()
# print(self.batches)
def shuffle(self):
np.random.shuffle(self.data)
return self
def split(self, train_percent=.6, validate_percent=.2, seed=None):
m = len(self.data)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
split = np.split(self.data, [train_end, validate_end, m])
self.train, self.validate, self.test = split[0], split[1], split[2],
# print(self.train.shape)
# print(self.validate.shape)
# print(self.test.shape)
return self
def batch(self):
self.batches = np.array([])
length = len(self.train)
rest = length % self.batch_size
if(rest != 0):
mark = int(length-rest)
left = np.split(self.train[:mark], self.batch_size)
right = np.array(self.train[mark:])
self.batches = left
for i in range(len(right)):
self.batches[i] = np.append(left[i], [right[i]], axis=0)
else:
self.batches = np.split(self.train, self.batch_size)
return self
data = get_json_data('pareto.json')
batch = Batch(data, 11)
|
{
"content_hash": "557978c26736371dc8e0afb3e844ff50",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 31.625,
"alnum_prop": 0.5764163372859025,
"repo_name": "ansteh/strapping",
"id": "116092105af4f37b7537c61048b08f027302085e",
"size": "1518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/batch-by-split.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "HTML",
"bytes": "6160"
},
{
"name": "JavaScript",
"bytes": "69936"
},
{
"name": "Python",
"bytes": "4639"
}
],
"symlink_target": ""
}
|
"""Implementation of the review subsystem."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
import random
from models import counters
from models import custom_modules
from models import entities
from models import student_work
from models import utils
import models.review
from modules.review import domain
from modules.review import peer
from google.appengine.ext import db
# In-process increment-only performance counters.
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY = counters.PerfCounter(
'gcb-pr-add-reviewer-bad-summary-key',
'number of times add_reviewer() failed due to a bad review summary key')
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN = counters.PerfCounter(
'gcb-pr-add-reviewer-set-assigner-kind-human',
("number of times add_reviewer() changed an existing step's assigner_kind "
'to ASSIGNER_KIND_HUMAN'))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP = counters.PerfCounter(
'gcb-pr-add-reviewer-create-review-step',
'number of times add_reviewer() created a new review step')
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED = counters.PerfCounter(
'gcb-pr-add-reviewer-expired-step-reassigned',
'number of times add_reviewer() reassigned an expired step')
COUNTER_ADD_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-failed',
'number of times add_reviewer() had a fatal error')
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED = counters.PerfCounter(
'gcb-pr-add-reviewer-removed-step-unremoved',
'number of times add_reviewer() unremoved a removed review step')
COUNTER_ADD_REVIEWER_START = counters.PerfCounter(
'gcb-pr-add-reviewer-start',
'number of times add_reviewer() has started processing')
COUNTER_ADD_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-add-reviewer-success',
'number of times add_reviewer() completed successfully')
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-unremoved-step-failed',
('number of times add_reviewer() failed on an unremoved step with a fatal '
'error'))
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED = counters.PerfCounter(
'gcb-pr-assignment-candidates-query-results-returned',
('number of results returned by the query returned by '
'get_assignment_candidates_query()'))
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-already-removed',
('number of times delete_reviewer() called on review step with removed '
'already True'))
COUNTER_DELETE_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-failed',
'number of times delete_reviewer() had a fatal error')
COUNTER_DELETE_REVIEWER_START = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-start',
'number of times delete_reviewer() has started processing')
COUNTER_DELETE_REVIEWER_STEP_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-step-miss',
'number of times delete_reviewer() found a missing review step')
COUNTER_DELETE_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-success',
'number of times delete_reviewer() completed successfully')
COUNTER_DELETE_REVIEWER_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-summary-miss',
'number of times delete_reviewer() found a missing review summary')
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION = counters.PerfCounter(
'gcb-pr-expire-review-cannot-transition',
('number of times expire_review() was called on a review step that could '
'not be transitioned to REVIEW_STATE_EXPIRED'))
COUNTER_EXPIRE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-expire-review-failed',
'number of times expire_review() had a fatal error')
COUNTER_EXPIRE_REVIEW_START = counters.PerfCounter(
'gcb-pr-expire-review-start',
'number of times expire_review() has started processing')
COUNTER_EXPIRE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-expire-review-step-miss',
'number of times expire_review() found a missing review step')
COUNTER_EXPIRE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-review-success',
'number of times expire_review() completed successfully')
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-expire-review-summary-miss',
'number of times expire_review() found a missing review summary')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-expire',
'number of records expire_old_reviews_for_unit() has expired')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-skip',
('number of times expire_old_reviews_for_unit() skipped a record due to an '
'error'))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-start',
'number of times expire_old_reviews_for_unit() has started processing')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-success',
'number of times expire_old_reviews_for_unit() completed successfully')
COUNTER_EXPIRY_QUERY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-expiry-query-keys-returned',
'number of keys returned by the query returned by get_expiry_query()')
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED = counters.PerfCounter(
'gcb-pr-get-new-review-already-assigned',
('number of times get_new_review() rejected a candidate because the '
'reviewer is already assigned to or has already completed it'))
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED = counters.PerfCounter(
'gcb-pr-get-new-review-assignment-attempted',
'number of times get_new_review() attempted to assign a candidate')
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED = counters.PerfCounter(
'gcb-pr-get-new-review-cannot-unremove-completed',
('number of times get_new_review() failed because the reviewer already had '
'a completed, removed review step'))
COUNTER_GET_NEW_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-get-new-review-failed',
'number of times get_new_review() had a fatal error')
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE = counters.PerfCounter(
'gcb-pr-get-new-review-none-assignable',
'number of times get_new_review() failed to find an assignable review')
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING = counters.PerfCounter(
'gcb-pr-get-new-review-reassign-existing',
('number of times get_new_review() unremoved and reassigned an existing '
'review step'))
COUNTER_GET_NEW_REVIEW_START = counters.PerfCounter(
'gcb-pr-get-new-review-start',
'number of times get_new_review() has started processing')
COUNTER_GET_NEW_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-get-new-review-success',
'number of times get_new_review() found and assigned a new review')
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED = counters.PerfCounter(
'gcb-pr-get-new-review-summary-changed',
('number of times get_new_review() rejected a candidate because the review '
'summary changed during processing'))
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-keys-returned',
'number of keys get_review_step_keys_by() returned')
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-failed',
'number of times get_review_step_keys_by() had a fatal error')
COUNTER_GET_REVIEW_STEP_KEYS_BY_START = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-start',
'number of times get_review_step_keys_by() started processing')
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-success',
'number of times get_review_step_keys_by() completed successfully')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-failed',
'number of times get_submission_and_review_step_keys() had a fatal error')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-keys-returned',
'number of keys get_submission_and_review_step_keys() returned')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-start',
('number of times get_submission_and_review_step_keys() has begun '
'processing'))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS = (
counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-submission-miss',
('number of times get_submission_and_review_step_keys() failed to find '
'a submission_key')))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step_keys-success',
('number of times get_submission-and-review-step-keys() completed '
'successfully'))
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED = counters.PerfCounter(
'gcb-pr-start-review-process-for-already-started',
('number of times start_review_process_for() called when review already '
'started'))
COUNTER_START_REVIEW_PROCESS_FOR_FAILED = counters.PerfCounter(
'gcb-pr-start-review-process-for-failed',
'number of times start_review_process_for() had a fatal error')
COUNTER_START_REVIEW_PROCESS_FOR_START = counters.PerfCounter(
'gcb-pr-start-review-process-for-start',
'number of times start_review_process_for() has started processing')
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS = counters.PerfCounter(
'gcb-pr-start-review-process-for-success',
'number of times start_review_process_for() completed successfully')
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-assigned-step',
'number of times write_review() transitioned an assigned step to completed')
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-expired-step',
'number of times write_review() transitioned an expired step to completed')
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-created-new-review',
'number of times write_review() created a new review')
COUNTER_WRITE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-write-review-failed',
'number of times write_review() had a fatal error')
COUNTER_WRITE_REVIEW_REVIEW_MISS = counters.PerfCounter(
'gcb-pr-write-review-review-miss',
'number of times write_review() found a missing review')
COUNTER_WRITE_REVIEW_START = counters.PerfCounter(
'gcb-pr-write-review-start',
'number of times write_review() started processing')
COUNTER_WRITE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-write-review-step-miss',
'number of times write_review() found a missing review step')
COUNTER_WRITE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-write-review-summary-miss',
'number of times write_review() found a missing review summary')
COUNTER_WRITE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-write-review-success',
'number of times write_review() completed successfully')
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-updated-existing-review',
'number of times write_review() updated an existing review')
# Number of entities to fetch when querying for all review steps that meet
# given criteria. Ideally we'd cursor through results rather than setting a
# ceiling, but for now let's allow as many removed results as unremoved.
_REVIEW_STEP_QUERY_LIMIT = 2 * domain.MAX_UNREMOVED_REVIEW_STEPS
class Manager(object):
"""Object that manages the review subsystem."""
@classmethod
def add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
"""Adds a reviewer for a submission.
If there is no pre-existing review step, one will be created.
Attempting to add an existing unremoved step in REVIEW_STATE_ASSIGNED or
REVIEW_STATE_COMPLETED is an error.
If there is an existing unremoved review in REVIEW_STATE_EXPIRED, it
will be put in REVIEW_STATE_ASSIGNED. If there is a removed review in
REVIEW_STATE_ASSIGNED or REVIEW_STATE_EXPIRED, it will be put in
REVIEW_STATE_ASSIGNED and unremoved. If it is in REVIEW_STATE_COMPLETED,
it will be unremoved but its state will not change. In all these cases
the assigner kind will be set to ASSIGNER_KIND_HUMAN.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
reviewer_key: db.Key of models.models.Student. The student to add as
a reviewer.
Raises:
domain.TransitionError: if there is a pre-existing review step found
in domain.REVIEW_STATE_ASSIGNED|COMPLETED.
Returns:
db.Key of written review step.
"""
try:
COUNTER_ADD_REVIEWER_START.inc()
key = cls._add_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
COUNTER_ADD_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_ADD_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
found = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(submission_key, reviewer_key))
if not found:
return cls._add_new_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
else:
return cls._add_reviewer_update_step(found)
@classmethod
def _add_new_reviewer(
cls, unit_id, submission_key, reviewee_key, reviewer_key):
summary = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee_key,
submission_key=submission_key, unit_id=unit_id)
# Synthesize summary key to avoid a second synchronous put op.
summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(submission_key))
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=reviewee_key,
reviewer_key=reviewer_key, state=domain.REVIEW_STATE_ASSIGNED,
submission_key=submission_key, unit_id=unit_id)
step_key, written_summary_key = entities.put([step, summary])
if summary_key != written_summary_key:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Synthesized invalid review summary key %s' % repr(summary_key))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP.inc()
return step_key
@classmethod
def _add_reviewer_update_step(cls, step):
should_increment_human = False
should_increment_reassigned = False
should_increment_unremoved = False
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Found invalid review summary key %s' % repr(
step.review_summary_key))
if not step.removed:
if step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
elif (step.state == domain.REVIEW_STATE_ASSIGNED or
step.state == domain.REVIEW_STATE_COMPLETED):
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED.inc()
raise domain.TransitionError(
'Unable to add new reviewer to step %s' % (
repr(step.key())),
step.state, domain.REVIEW_STATE_ASSIGNED)
else:
should_increment_unremoved = True
step.removed = False
if step.state != domain.REVIEW_STATE_EXPIRED:
summary.increment_count(step.state)
else:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
if step.assigner_kind != domain.ASSIGNER_KIND_HUMAN:
should_increment_human = True
step.assigner_kind = domain.ASSIGNER_KIND_HUMAN
step_key = entities.put([step, summary])[0]
if should_increment_human:
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN.inc()
if should_increment_reassigned:
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED.inc()
if should_increment_unremoved:
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED.inc()
return step_key
@classmethod
def delete_reviewer(cls, review_step_key):
"""Deletes the given review step.
We do not physically delete the review step; we mark it as removed,
meaning it will be ignored from most queries and the associated review
summary will have its corresponding state count decremented. Calling
this method on a removed review step is an error.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to delete.
Raises:
domain.RemovedError: if called on a review step that has already
been marked removed.
KeyError: if there is no review step with the given key, or if the
step references a review summary that does not exist.
Returns:
db.Key of deleted review step.
"""
try:
COUNTER_DELETE_REVIEWER_START.inc()
key = cls._mark_review_step_removed(review_step_key)
COUNTER_DELETE_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_DELETE_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _mark_review_step_removed(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_DELETE_REVIEWER_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED.inc()
raise domain.RemovedError(
'Cannot remove step %s' % repr(review_step_key), step.removed)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_DELETE_REVIEWER_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
step.removed = True
summary.decrement_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_review(cls, review_step_key):
"""Puts a review step in state REVIEW_STATE_EXPIRED.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to expire.
Raises:
domain.RemovedError: if called on a step that is removed.
domain.TransitionError: if called on a review step that cannot be
transitioned to REVIEW_STATE_EXPIRED (that is, it is already in
REVIEW_STATE_COMPLETED or REVIEW_STATE_EXPIRED).
KeyError: if there is no review with the given key, or the step
references a review summary that does not exist.
Returns:
db.Key of the expired review step.
"""
try:
COUNTER_EXPIRE_REVIEW_START.inc()
key = cls._transition_state_to_expired(review_step_key)
COUNTER_EXPIRE_REVIEW_SUCCESS.inc()
return key
except Exception as e:
COUNTER_EXPIRE_REVIEW_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _transition_state_to_expired(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_EXPIRE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.RemovedError(
'Cannot transition step %s' % repr(review_step_key),
step.removed)
if step.state in (
domain.REVIEW_STATE_COMPLETED, domain.REVIEW_STATE_EXPIRED):
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.TransitionError(
'Cannot transition step %s' % repr(review_step_key),
step.state, domain.REVIEW_STATE_EXPIRED)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_EXPIRED
summary.increment_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_old_reviews_for_unit(cls, review_window_mins, unit_id):
"""Finds and expires all old review steps for a single unit.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
Returns:
2-tuple of list of db.Key of peer.ReviewStep. 0th element is keys
that were written successfully; 1st element is keys that we failed
to update.
"""
query = cls.get_expiry_query(review_window_mins, unit_id)
mapper = utils.QueryMapper(
query, counter=COUNTER_EXPIRY_QUERY_KEYS_RETURNED, report_every=100)
expired_keys = []
exception_keys = []
def map_fn(review_step_key, expired_keys, exception_keys):
try:
expired_keys.append(cls.expire_review(review_step_key))
except: # All errors are the same. pylint: disable-msg=bare-except
# Skip. Either the entity was updated between the query and
# the update, meaning we don't need to expire it; or we ran into
# a transient datastore error, meaning we'll expire it next
# time.
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP.inc()
exception_keys.append(review_step_key)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START.inc()
mapper.run(map_fn, expired_keys, exception_keys)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE.inc(
increment=len(expired_keys))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS.inc()
return expired_keys, exception_keys
@classmethod
def get_assignment_candidates_query(cls, unit_id):
"""Gets query that returns candidates for new review assignment.
New assignment candidates are scoped to a unit. We prefer first items
that have the smallest number of completed reviews, then those that have
the smallest number of assigned reviews, then those that were created
most recently.
The results of the query are user-independent.
Args:
unit_id: string. Id of the unit to restrict the query to.
Returns:
db.Query that will return [peer.ReviewSummary].
"""
return peer.ReviewSummary.all(
).filter(
peer.ReviewSummary.unit_id.name, unit_id
).order(
peer.ReviewSummary.completed_count.name
).order(
peer.ReviewSummary.assigned_count.name
).order(
peer.ReviewSummary.create_date.name)
@classmethod
def get_expiry_query(
cls, review_window_mins, unit_id, now_fn=datetime.datetime.now):
"""Gets a db.Query that returns review steps to mark expired.
Results are items that were assigned by machine, are currently assigned,
are not removed, were last updated more than review_window_mins ago,
and are ordered by change date ascending.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
now_fn: function that returns the current UTC datetime. Injectable
for tests only.
Returns:
db.Query.
"""
get_before = now_fn() - datetime.timedelta(
minutes=review_window_mins)
return peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.unit_id.name, unit_id,
).filter(
peer.ReviewStep.assigner_kind.name, domain.ASSIGNER_KIND_AUTO
).filter(
peer.ReviewStep.state.name, domain.REVIEW_STATE_ASSIGNED
).filter(
peer.ReviewStep.removed.name, False
).filter(
'%s <=' % peer.ReviewStep.change_date.name, get_before
).order(
peer.ReviewStep.change_date.name)
@classmethod
def get_new_review(
cls, unit_id, reviewer_key, candidate_count=20, max_retries=5):
"""Attempts to assign a review to a reviewer.
We prioritize possible reviews by querying review summary objects,
finding those that best satisfy cls.get_assignment_candidates_query.
To minimize write contention, we nontransactionally grab candidate_count
candidates from the head of the query results. Post-query we filter out
any candidates that are for the prospective reviewer's own work.
Then we randomly select one. We transactionally attempt to assign that
review. If assignment fails because the candidate is updated between
selection and assignment or the assignment is for a submission the
reviewer already has or has already done, we remove the candidate from
the list. We then retry assignment up to max_retries times. If we run
out of retries or candidates, we raise domain.NotAssignableError.
This is a naive implementation because it scales only to relatively low
new review assignments per second and because it can raise
domain.NotAssignableError when there are in fact assignable reviews.
Args:
unit_id: string. The unit to assign work from.
reviewer_key: db.Key of models.models.Student. The reviewer to
attempt to assign the review to.
candidate_count: int. The number of candidate keys to fetch and
attempt to assign from. Increasing this decreases the chance
that we will have write contention on reviews, but it costs 1 +
num_results datastore reads and can get expensive for large
courses.
max_retries: int. Number of times to retry failed assignment
attempts. Careful not to set this too high as a) datastore
throughput is slow and latency from this method is user-facing,
and b) if you encounter a few failures it is likely that all
candidates are now failures, so each retry past the first few is
of questionable value.
Raises:
domain.NotAssignableError: if no review can currently be assigned
for the given unit_id.
Returns:
db.Key of peer.ReviewStep. The newly created assigned review step.
"""
try:
COUNTER_GET_NEW_REVIEW_START.inc()
# Filter out candidates that are for submissions by the reviewer.
raw_candidates = cls.get_assignment_candidates_query(unit_id).fetch(
candidate_count)
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED.inc(
increment=len(raw_candidates))
candidates = [
candidate for candidate in raw_candidates
if candidate.reviewee_key != reviewer_key]
retries = 0
while True:
if not candidates or retries >= max_retries:
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE.inc()
raise domain.NotAssignableError(
'No reviews assignable for unit %s and reviewer %s' % (
unit_id, repr(reviewer_key)))
candidate = cls._choose_assignment_candidate(candidates)
candidates.remove(candidate)
assigned_key = cls._attempt_review_assignment(
candidate.key(), reviewer_key, candidate.change_date)
if not assigned_key:
retries += 1
else:
COUNTER_GET_NEW_REVIEW_SUCCESS.inc()
return assigned_key
except Exception, e:
COUNTER_GET_NEW_REVIEW_FAILED.inc()
raise e
@classmethod
def _choose_assignment_candidate(cls, candidates):
"""Seam that allows different choice functions in tests."""
return random.choice(candidates)
@classmethod
@db.transactional(xg=True)
def _attempt_review_assignment(
cls, review_summary_key, reviewer_key, last_change_date):
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED.inc()
summary = entities.get(review_summary_key)
if not summary:
raise KeyError('No review summary found with key %s' % repr(
review_summary_key))
if summary.change_date != last_change_date:
# The summary has changed since we queried it. We cannot know for
# sure what the edit was, but let's skip to the next one because it
# was probably a review assignment.
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED.inc()
return
step = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(summary.submission_key, reviewer_key))
if not step:
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_summary_key=summary.key(),
reviewee_key=summary.reviewee_key, reviewer_key=reviewer_key,
state=domain.REVIEW_STATE_ASSIGNED,
submission_key=summary.submission_key, unit_id=summary.unit_id)
else:
if step.state == domain.REVIEW_STATE_COMPLETED:
# Reviewer has previously done this review and the review
# has been deleted. Skip to the next one.
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED.inc()
return
if step.removed:
# We can reassign the existing review step.
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING.inc()
step.removed = False
step.assigner_kind = domain.ASSIGNER_KIND_AUTO
step.state = domain.REVIEW_STATE_ASSIGNED
else:
# Reviewee has already reviewed or is already assigned to review
# this submission, so we cannot reassign the step.
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED.inc()
return
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
return entities.put([step, summary])[0]
@classmethod
def get_review_step_keys_by(cls, unit_id, reviewer_key):
"""Gets the keys of all review steps in a unit for a reviewer.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewer_key: db.Key of models.models.Student. The author of the
requested reviews.
Returns:
[db.Key of peer.ReviewStep].
"""
COUNTER_GET_REVIEW_STEP_KEYS_BY_START.inc()
try:
query = peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.reviewer_key.name, reviewer_key
).filter(
peer.ReviewStep.unit_id.name, unit_id
).order(
peer.ReviewStep.create_date.name,
)
keys = [key for key in query.fetch(_REVIEW_STEP_QUERY_LIMIT)]
except Exception as e:
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED.inc()
raise e
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS.inc()
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED.inc(increment=len(keys))
return keys
@classmethod
def get_review_steps_by_keys(cls, keys):
"""Gets review steps by their keys.
Args:
keys: [db.Key of peer.ReviewStep]. Keys to fetch.
Returns:
[domain.ReviewStep or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_review_step(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review_step(cls, model):
if model is None:
return
return domain.ReviewStep(
assigner_kind=model.assigner_kind, change_date=model.change_date,
create_date=model.create_date, key=model.key(),
removed=model.removed, review_key=model.review_key,
review_summary_key=model.review_summary_key,
reviewee_key=model.reviewee_key, reviewer_key=model.reviewer_key,
state=model.state, submission_key=model.submission_key,
unit_id=model.unit_id
)
@classmethod
def get_reviews_by_keys(cls, keys):
"""Gets reviews by their keys.
Args:
keys: [db.Key of review.Review]. Keys to fetch.
Returns:
[domain.Review or None]. Missed keys return None in place in result
list.
"""
return [cls._make_domain_review(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review(cls, model):
if model is None:
return
return domain.Review(contents=model.contents, key=model.key())
@classmethod
def get_submission_and_review_step_keys(cls, unit_id, reviewee_key):
"""Gets the submission key/review step keys for the given pair.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
domain.ConstraintError: if multiple review summary keys were found
for the given unit_id, reviewee_key pair.
KeyError: if there is no review summary for the given unit_id,
reviewee pair.
Returns:
(db.Key of Submission, [db.Key of peer.ReviewStep]) if submission
found for given unit_id, reviewee_key pair; None otherwise.
"""
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START.inc()
try:
submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(unit_id, reviewee_key))
submission = entities.get(submission_key)
if not submission:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS.inc(
)
return
step_keys_query = peer.ReviewStep.all(
keys_only=True
).filter(
peer.ReviewStep.submission_key.name, submission_key
)
step_keys = step_keys_query.fetch(_REVIEW_STEP_QUERY_LIMIT)
results = (submission_key, step_keys)
except Exception as e:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED.inc()
raise e
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS.inc()
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED.inc(
increment=len(step_keys))
return results
@classmethod
def get_submissions_by_keys(cls, keys):
"""Gets submissions by their keys.
Args:
keys: [db.Key of review.Submission]. Keys to fetch.
Returns:
[domain.Submission or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_submission(model) for model in entities.get(keys)]
@classmethod
def _make_domain_submission(cls, model):
if model is None:
return
return domain.Submission(contents=model.contents, key=model.key())
@classmethod
def start_review_process_for(cls, unit_id, submission_key, reviewee_key):
"""Registers a new submission with the review subsystem.
Once registered, reviews can be assigned against a given submission,
either by humans or by machine. No reviews are assigned during
registration -- this method merely makes them assignable.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
db.BadValueError: if passed args are invalid.
domain.ReviewProcessAlreadyStartedError: if the review process has
already been started for this student's submission.
Returns:
db.Key of created ReviewSummary.
"""
try:
COUNTER_START_REVIEW_PROCESS_FOR_START.inc()
key = cls._create_review_summary(
reviewee_key, submission_key, unit_id)
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS.inc()
return key
except Exception as e:
COUNTER_START_REVIEW_PROCESS_FOR_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _create_review_summary(cls, reviewee_key, submission_key, unit_id):
collision = peer.ReviewSummary.get_by_key_name(
peer.ReviewSummary.key_name(submission_key))
if collision:
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED.inc()
raise domain.ReviewProcessAlreadyStartedError()
return peer.ReviewSummary(
reviewee_key=reviewee_key, submission_key=submission_key,
unit_id=unit_id,
).put()
@classmethod
def write_review(
cls, review_step_key, review_payload, mark_completed=True):
"""Writes a review, updating associated internal state.
If the passed step already has a review, that review will be updated. If
it does not have a review, a new one will be created with the passed
payload.
Args:
review_step_key: db.Key of peer.ReviewStep. The key of the review
step to update.
review_payload: string. New contents of the review.
mark_completed: boolean. If True, set the state of the review to
domain.REVIEW_STATE_COMPLETED. If False, leave the state as it
was.
Raises:
domain.ConstraintError: if no review found for the review step.
domain.RemovedError: if the step for the review is removed.
domain.TransitionError: if mark_completed was True but the step was
already in domain.REVIEW_STATE_COMPLETED.
KeyError: if no review step was found with review_step_key.
Returns:
db.Key of peer.ReviewStep: key of the written review step.
"""
COUNTER_WRITE_REVIEW_START.inc()
try:
step_key = cls._update_review_contents_and_change_state(
review_step_key, review_payload, mark_completed)
except Exception as e:
COUNTER_WRITE_REVIEW_FAILED.inc()
raise e
COUNTER_WRITE_REVIEW_SUCCESS.inc()
return step_key
@classmethod
@db.transactional(xg=True)
def _update_review_contents_and_change_state(
cls, review_step_key, review_payload, mark_completed):
should_increment_created_new_review = False
should_increment_updated_existing_review = False
should_increment_assigned_to_completed = False
should_increment_expired_to_completed = False
step = entities.get(review_step_key)
if not step:
COUNTER_WRITE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
elif step.removed:
raise domain.RemovedError(
'Unable to process step %s' % repr(step.key()), step.removed)
elif mark_completed and step.state == domain.REVIEW_STATE_COMPLETED:
raise domain.TransitionError(
'Unable to transition step %s' % repr(step.key()),
step.state, domain.REVIEW_STATE_COMPLETED)
if step.review_key:
review_to_update = entities.get(step.review_key)
if review_to_update:
should_increment_updated_existing_review = True
else:
review_to_update = student_work.Review(
contents=review_payload, reviewer_key=step.reviewer_key,
unit_id=step.unit_id)
step.review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(step.unit_id, step.reviewer_key))
should_increment_created_new_review = True
if not review_to_update:
COUNTER_WRITE_REVIEW_REVIEW_MISS.inc()
raise domain.ConstraintError(
'No review found with key %s' % repr(step.review_key))
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_WRITE_REVIEW_SUMMARY_MISS.inc()
raise domain.ConstraintError(
'No review summary found with key %s' % repr(
step.review_summary_key))
review_to_update.contents = review_payload
updated_step_key = None
if not mark_completed:
_, updated_step_key = entities.put([review_to_update, step])
else:
if step.state == domain.REVIEW_STATE_ASSIGNED:
should_increment_assigned_to_completed = True
elif step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_expired_to_completed = True
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_COMPLETED
summary.increment_count(step.state)
_, updated_step_key, _ = entities.put(
[review_to_update, step, summary])
if should_increment_created_new_review:
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW.inc()
elif should_increment_updated_existing_review:
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW.inc()
if should_increment_assigned_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP.inc()
elif should_increment_expired_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP.inc()
return updated_step_key
custom_module = None
def register_module():
"""Registers this module in the registry."""
import modules.dashboard # pylint: disable-msg=g-import-not-at-top
from modules.review import stats # pylint: disable-msg=g-import-not-at-top
from modules.review import cron # pylint: disable-msg=g-import-not-at-top
# register custom dashboard section
modules.dashboard.dashboard.DashboardRegistry.add_custom_analytics_section(
stats.PeerReviewStatsHandler)
# register this peer review implementation
models.review.ReviewsProcessor.set_peer_matcher(Manager)
# register cron handler
cron_handlers = [(
'/cron/expire_old_assigned_reviews',
cron.ExpireOldAssignedReviewsHandler)]
global custom_module
custom_module = custom_modules.Module(
'Peer Review Engine',
'A set of classes for managing peer review process.',
cron_handlers, [])
return custom_module
|
{
"content_hash": "dfd8b956cdb5a42b66940c90c1427a22",
"timestamp": "",
"source": "github",
"line_count": 1066,
"max_line_length": 80,
"avg_line_length": 42.37523452157598,
"alnum_prop": 0.645333392366953,
"repo_name": "graemian/ami-mooc-pilot",
"id": "95865b297bb467393e65bd2e5a97e860408ccd97",
"size": "45770",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "modules/review/review.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "196951"
},
{
"name": "Perl",
"bytes": "12672"
},
{
"name": "Python",
"bytes": "950098"
}
],
"symlink_target": ""
}
|
from setuptools import setup
import asdoc2dash
def join_files(*files):
content = ""
for name in files:
f = open(name)
content += f.read() + "\n"
f.close()
return content
setup(
name = 'asdoc2dash',
version = asdoc2dash.__version__,
description = asdoc2dash.__doc__.strip(),
long_description = join_files("README.rst", "CHANGES.rst"),
url = asdoc2dash.__homepage__,
license = asdoc2dash.__license__,
author = asdoc2dash.__author__,
author_email = asdoc2dash.__email__,
packages = ["asdoc2dash"],
entry_points = {
"console_scripts": ["asdoc2dash = asdoc2dash.asdoc2dash:main"]
},
install_requires = open("requirements.txt").read().splitlines(),
platforms = 'any',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Topic :: Documentation',
'Topic :: Software Development',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing'
]
)
|
{
"content_hash": "a980ad4ed93a95bb63add5f0cf6f0c45",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 30.128205128205128,
"alnum_prop": 0.5914893617021276,
"repo_name": "ton1517/asdoc2dash",
"id": "990b628b70254a7c4d5e9ee381409ac20ac6ff3d",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14869"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
CONF = cfg.CONF
class FakeVolume(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', 'fake_vol_id')
self.status = kwargs.pop('status', 'available')
self.device = kwargs.pop('device', '')
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, attr):
return getattr(self, attr)
class FakeVolumeSnapshot(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', 'fake_volsnap_id')
self.status = kwargs.pop('status', 'available')
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, attr):
return getattr(self, attr)
class API(object):
"""Fake Volume API."""
def get(self, *args, **kwargs):
pass
def create_snapshot_force(self, *args, **kwargs):
pass
def get_snapshot(self, *args, **kwargs):
pass
def delete_snapshot(self, *args, **kwargs):
pass
def create(self, *args, **kwargs):
pass
def extend(self, *args, **kwargs):
pass
def get_all(self, search_opts):
pass
def delete(self, volume_id):
pass
def get_all_snapshots(self, search_opts):
pass
|
{
"content_hash": "e0323d58f45f2de0e47f3fe8f66db3f5",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 55,
"avg_line_length": 22.982142857142858,
"alnum_prop": 0.567987567987568,
"repo_name": "NetApp/manila",
"id": "c4f87b34581dc437f5d377db5f3b01992d4317e0",
"size": "1923",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "manila/tests/fake_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "8111068"
},
{
"name": "Shell",
"bytes": "91643"
}
],
"symlink_target": ""
}
|
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
self.soup.object_was_parsed(child, parent=self.element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
pass
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
newParent.appendChild(child)
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
|
{
"content_hash": "a7a307cdf3eaf6147b54eba631df8b85",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 159,
"avg_line_length": 35.44303797468354,
"alnum_prop": 0.6202380952380953,
"repo_name": "eclipselu/ppurl-alfred-workflow",
"id": "6ed505550e6dcc3f0263230c4513a646298ca69b",
"size": "8400",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/bs4/builder/_html5lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "274428"
}
],
"symlink_target": ""
}
|
from google.cloud import dialogflowcx_v3beta1
async def sample_delete_flow():
# Create a client
client = dialogflowcx_v3beta1.FlowsAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.DeleteFlowRequest(
name="name_value",
)
# Make the request
await client.delete_flow(request=request)
# [END dialogflow_v3beta1_generated_Flows_DeleteFlow_async]
|
{
"content_hash": "aa056f79a007bf9b979add7674f77ba3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 59,
"avg_line_length": 24.235294117647058,
"alnum_prop": 0.7233009708737864,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "62341ff64555bc4f4a7d339eb9ece94bcc8bca02",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v3beta1_generated_flows_delete_flow_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
}
|
"""
Test exceptions in LTI provider.
"""
import logging
from django.http.response import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls.base import reverse
import mock
from mock import Mock
import pytest
from bridge_lti.provider import learner_flow
from module.tests.test_views import BridgeTestCase
log = logging.getLogger(__name__)
class RaisedExceptionUsesCustomTemplateTest(BridgeTestCase):
"""
Test incorrect working LTI provider.
"""
def setUp(self):
super().setUp()
self.rf = RequestFactory()
self.correct_kw = {'collection_order_slug': self.collection_order1.slug}
self.not_correct_kw = {'collection_order_slug': self.collection_order1.slug + "_wrong"}
self.url = reverse('lti:launch', kwargs=self.correct_kw)
self.not_correct_url = reverse('lti:launch', kwargs=self.not_correct_kw)
@override_settings(DEBUG=False)
def test_learner_flow_with_incorrect_collection_order_slug(self):
"""
Check if learner_flow function is called with incorrect collection_slug raise proper exception.
"""
request = self.rf.post(self.url)
with pytest.raises(Http404):
learner_flow(
request,
lti_lms_platform=None,
tool_provider=None,
collection_order_slug=self.not_correct_kw["collection_order_slug"]
)
@mock.patch('lti.contrib.django.DjangoToolProvider.from_django_request')
@override_settings(DEBUG=False)
def test_client_post_with_incorrect_collection_order_slug_test(self, from_django_request):
"""
Test that when POST request received with not correct data it will show 404 error with correct template.
"""
is_valid_request = Mock(return_value=False)
tool_provider = Mock(is_valid_request=is_valid_request)
from_django_request.return_value = tool_provider
response = self.client.post(self.not_correct_url)
self.assertTemplateUsed(response, '404.html')
from_django_request.assert_called_once()
tool_provider.is_valid_request.assert_called_once()
|
{
"content_hash": "ee61751559bd136fae005615c6351f61",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 112,
"avg_line_length": 36.24590163934426,
"alnum_prop": 0.681592039800995,
"repo_name": "harvard-vpal/bridge-adaptivity",
"id": "335b99b11c093df66cc624aec3eb02db3c645021",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bridge_adaptivity/bridge_lti/tests/test_exeptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "Dockerfile",
"bytes": "1586"
},
{
"name": "HTML",
"bytes": "70921"
},
{
"name": "JavaScript",
"bytes": "29636"
},
{
"name": "Makefile",
"bytes": "1614"
},
{
"name": "Python",
"bytes": "315506"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('tokens', '0003_auto_20170224_0334'),
]
operations = [
migrations.AlterField(
model_name='abstracttoken',
name='token',
field=models.TextField(default=uuid.uuid4, max_length=32, unique=True, verbose_name='Token'),
),
]
|
{
"content_hash": "74e9a3c27852f14906c5ed0225e49fef",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 105,
"avg_line_length": 23.68421052631579,
"alnum_prop": 0.6222222222222222,
"repo_name": "sebastienbarbier/723e_server",
"id": "c4fbf6339d687a25af5841d45bd0120c3683eece",
"size": "523",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seven23/models/tokens/migrations/0004_auto_20170815_1138.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "182572"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "99185"
}
],
"symlink_target": ""
}
|
import os
import sys
# Replace {{HDFS_MASTER}} with hdfs://freestyle
# Replace {{HDFS_PORT}} with 8020
# Replace {{EDGES_PATH}} and {{VERTICES_PATH}} with what you want to copy .e.g /home/icg27/lj_edges/ and /home/icg27/lj_pr/ . They are paths on hdfs.
# For testing you can comment out: ascii_to_binary("/tmp{{VERTICES_PATH}}input", ver_file);
# Then you can just make all; ./DataTransformer_bin copy_input. This should allow you to test it. I think it copies the data to /tmp{{VERTICES_PATH}} and /tmp{{EDGES_PATH}}.
trans = { "{{VERTEX_DATA_TYPE}}" : "double" ,
"{{HDFS_MASTER}}" : "freestyle.private.srg.cl.cam.ac.uk" ,
"{{HDFS_PORT}}" : "8020" ,
"{{EDGES_PATH}}" : "/home/icg27/lj_edges" ,
"{{VERTICES_PATH}}" : "/home/icg27/lj_pr/",
"{{TMP_ROOT}}" : "/tmp",
}
out = open(sys.argv[2], "w")
for line in open(sys.argv[1]):
for key in trans.keys():
line = line.replace(key, trans[key])
out.write(line)
|
{
"content_hash": "4b7e768bbedd40efec35b60359506900",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 173,
"avg_line_length": 43.95652173913044,
"alnum_prop": 0.5914935707220573,
"repo_name": "camsas/Musketeer",
"id": "491b0834320e1b398d76b56cf624d26dd9f0487a",
"size": "1030",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/translation/graphchi_templates/quicktrans.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "44126"
},
{
"name": "C#",
"bytes": "14423"
},
{
"name": "C++",
"bytes": "998856"
},
{
"name": "CMake",
"bytes": "222"
},
{
"name": "GAP",
"bytes": "11218"
},
{
"name": "Java",
"bytes": "116970"
},
{
"name": "Makefile",
"bytes": "13835"
},
{
"name": "Protocol Buffer",
"bytes": "277"
},
{
"name": "Python",
"bytes": "32066"
},
{
"name": "Scala",
"bytes": "17906"
},
{
"name": "Shell",
"bytes": "51084"
}
],
"symlink_target": ""
}
|
try:
import weechat, re #sys
except Exception:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
quit()
SCRIPT_NAME = "spell_correction"
SCRIPT_AUTHOR = "nils_2 <weechatter@arcor.de>"
SCRIPT_VERSION = "1.0"
SCRIPT_LICENSE = "GPL"
SCRIPT_DESC = "a spell correction script to use with spell/aspell plugin"
OPTIONS = { 'auto_pop_up_item' : ('off','automatic pop-up suggestion item on a misspelled word'),
'auto_replace' : ('on','replaces misspelled word with selected suggestion, automatically. If you use "off" you will have to bind command "/%s replace" to a key' % SCRIPT_NAME),
'catch_input_completion' : ('on','will catch the input_complete commands [TAB-key]'),
'eat_input_char' : ('on','will eat the next char you type, after replacing a misspelled word'),
'suggest_item' : ('${white}%S${default}', 'item format (%S = suggestion, %D = dict). Colors are allowed with format "${color}". note: since WeeChat 0.4.2 content is evaluated, see /help eval.'),
'hide_single_dict' : ('on','will hide dict in item if you have a single dict for buffer only'),
'complete_near' : ('0','show suggestions item only if you are n-chars near the misspelled word (0 = off). Using \'replace_mode\' cursor has to be n-chars near misspelled word to cycle through suggestions.'),
'replace_mode' : ('off','misspelled word will be replaced directly by suggestions. Use option \'complete_near\' to specify range and item \'spell_suggestion\' to show possible suggestions.'),
}
Hooks = {'catch_input_completion': '', 'catch_input_return': ''}
regex_color=re.compile('\$\{([^\{\}]+)\}')
regex_optional_tags=re.compile('%\{[^\{\}]+\}')
multiline_input = 0
plugin_name = "spell" # WeeChat >= 2.5
old_plugin_name = "aspell" # WeeChat < 2.5
# ================================[ weechat options & description ]===============================
def init_options():
for option,value in OPTIONS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
OPTIONS[option] = value[0]
else:
OPTIONS[option] = weechat.config_get_plugin(option)
weechat.config_set_desc_plugin(option, '%s (default: "%s")' % (value[1], value[0]))
def toggle_refresh(pointer, name, value):
global OPTIONS
option = name[len('plugins.var.python.' + SCRIPT_NAME + '.'):] # get optionname
OPTIONS[option] = value # save new value
if OPTIONS['catch_input_completion'].lower() == "off":
if Hooks['catch_input_completion']:
weechat.unhook(Hooks['catch_input_completion'])
Hooks['catch_input_completion'] = ''
weechat.unhook(Hooks['catch_input_return'])
Hooks['catch_input_return'] = ''
elif OPTIONS['catch_input_completion'].lower() == "on":
if not Hooks['catch_input_completion']:
Hooks['catch_input_completion'] = weechat.hook_command_run('/input complete*', 'input_complete_cb', '')
Hooks['catch_input_return'] = weechat.hook_command_run('/input return', 'input_return_cb', '')
return weechat.WEECHAT_RC_OK
# ================================[ hooks() ]===============================
# called from command and when TAB is pressed
def auto_suggest_cmd_cb(data, buffer, args):
arguments = args.split(' ')
input_line = weechat.buffer_get_string(buffer, 'input')
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_input_line', '%s' % input_line)
if args.lower() == 'replace':
replace_misspelled_word(buffer)
return weechat.WEECHAT_RC_OK
# if not weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item'):
# return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position:
position = -1
if arguments[0].lower() == 'addword' and len(arguments) >= 2:
found_dicts = get_aspell_dict_for(buffer)
if len(found_dicts.split(",")) == 1 and len(arguments) == 2:
word = arguments[1]
weechat.command("","/%s addword %s" % (plugin_name,word) )
elif arguments[1] in found_dicts.split(",") and len(arguments) == 3:
word = arguments[2]
weechat.command("","/%s addword %s %s" % (plugin_name,arguments[1],word))
# get localvar for misspelled_word and suggestions from buffer or return
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if not localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
if arguments[0].lower() == 'addword' and len(arguments) == 1:
found_dicts = get_aspell_dict_for(buffer)
if not ":" in localvar_aspell_suggest and not "," in found_dicts:
weechat.command("","/%s addword %s" % (plugin_name,localvar_aspell_suggest))
else:
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
weechat.command("","/%s addword %s" % (plugin_name,misspelled_word))
return weechat.WEECHAT_RC_OK
if not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
aspell_suggestions = aspell_suggestions.replace('/',',')
aspell_suggestion_list = aspell_suggestions.split(',')
if len(aspell_suggestion_list) == 0:
position = -1
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# append an empty entry to suggestions to quit without changes.
if OPTIONS['auto_replace'].lower() == "on":
aspell_suggestion_list.append('')
position = int(position)
# cycle backwards through suggestions
if args == '/input complete_previous' or args == 'previous':
# position <= -1? go to last suggestion
if position <= -1:
position = len(aspell_suggestion_list)-1
position -= 1
# cycle forward through suggestions
else:
if position >= len(aspell_suggestion_list)-1:
position = 0
else:
position += 1
# 2 = TAB or command is called
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('2',str(position),aspell_suggestion_list[position]))
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
# spell_correction_suggest_item:
def show_spell_correction_item_cb (data, item, window):
# check for root input bar!
if not window:
window = weechat.current_window()
# weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item'):
buffer = weechat.window_get_pointer(window,"buffer")
if buffer == '':
return ''
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
return ''
config_spell_suggest_item = weechat.config_get_plugin('suggest_item')
dict_found = search_dict(buffer,position)
if dict_found:
if config_spell_suggest_item:
show_item = config_spell_suggest_item.replace('%S',aspell_suggest_item)
show_item = show_item.replace('%D',dict_found)
show_item = substitute_colors(show_item)
return '%s' % (show_item)
else:
return aspell_suggest_item
else:
if config_spell_suggest_item:
show_item = config_spell_suggest_item.replace('%S',aspell_suggest_item)
if weechat.config_get_plugin('hide_single_dict').lower() == 'off':
show_item = show_item.replace('%D',get_aspell_dict_for(buffer))
else:
show_item = show_item.replace('%D','').rstrip()
show_item = substitute_colors(show_item)
return '%s' % (show_item)
return aspell_suggest_item
# if a suggestion is selected and you edit input line, then replace misspelled word!
def input_text_changed_cb(data, signal, signal_data):
global multiline_input
if multiline_input == '1':
return weechat.WEECHAT_RC_OK
buffer = signal_data
if not buffer:
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
cursor_pos = weechat.buffer_get_integer(buffer,'input_pos')
if get_localvar_aspell_suggest(buffer) and cursor_pos >0: # save cursor position of misspelled word
weechat.buffer_set(buffer, 'localvar_set_current_cursor_pos', '%s' % cursor_pos)
else:
saved_cursor_pos = weechat.buffer_get_string(buffer, 'localvar_current_cursor_pos')
if saved_cursor_pos != '':
if int(cursor_pos) > int(saved_cursor_pos) + int(OPTIONS['complete_near']) + 3: # +3 to be sure!
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
# 1 = cursor etc., 2 = TAB, 3 = replace_mode
if tab_complete != '0':
if not aspell_suggest_item:
aspell_suggest_item = ''
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('0',position,aspell_suggest_item))
return weechat.WEECHAT_RC_OK
if OPTIONS['auto_replace'].lower() == "on":
replace_misspelled_word(buffer)
return weechat.WEECHAT_RC_OK
# weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:' % ('0','-1'))
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# also remove localvar_suggest_item
def replace_misspelled_word(buffer):
input_line = weechat.buffer_get_string(buffer, 'localvar_spell_correction_suggest_input_line')
if not input_line:
# remove spell_correction item
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return
if OPTIONS['eat_input_char'].lower() == 'off' or input_line == '':
input_pos = weechat.buffer_get_integer(buffer,'input_pos')
# check cursor position
if len(input_line) < int(input_pos) or input_line[int(input_pos)-1] == ' ' or input_line == '':
input_line = weechat.buffer_get_string(buffer, 'input')
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_input_line', '')
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
# localvar_aspell_suggest = word,word2/wort,wort2
if localvar_aspell_suggest:
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
aspell_suggestions = aspell_suggestions.replace('/',',')
aspell_suggestion_list = aspell_suggestions.split(',')
else:
return
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
return
position = int(position)
input_line = input_line.replace(misspelled_word, aspell_suggestion_list[position])
if input_line[-2:] == ' ':
input_line = input_line.rstrip()
input_line = input_line + ' '
weechat.buffer_set(buffer,'input',input_line)
# set new cursor position. check if suggestion is longer or smaller than misspelled word
input_pos = weechat.buffer_get_integer(buffer,'input_pos') + 1
length_misspelled_word = len(misspelled_word)
length_suggestion_word = len(aspell_suggestion_list[position])
if length_misspelled_word < length_suggestion_word:
difference = length_suggestion_word - length_misspelled_word
new_position = input_pos + difference + 1
weechat.buffer_set(buffer,'input_pos',str(new_position))
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
weechat.bar_item_update('spell_suggestion')
weechat.bar_item_update('spell_correction')
# ================================[ subroutines ]===============================
# get aspell dict for suggestion
def search_dict(buffer,position):
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
dicts_found = localvar_aspell_suggest.count("/")
if not dicts_found:
return 0
# aspell.dict.full_name = en_GB,de_DE-neu
# localvar_dict = en_GB,de_DE-neu
dictionary = get_aspell_dict_for(buffer)
if not dictionary:
return 0
dictionary_list = dictionary.split(',')
# more then one dict?
if len(dictionary_list) > 1:
undef,aspell_suggestions = localvar_aspell_suggest.split(':')
dictionary = aspell_suggestions.split('/')
words = 0
i = -1
for a in dictionary:
i += 1
words += a.count(',')+1
if words > int(position):
break
return dictionary_list[i]
# format of localvar aspell_suggest (using two dicts): diehs:die hs,die-hs,dies/dies,Diebs,Viehs
def get_localvar_aspell_suggest(buffer):
return weechat.buffer_get_string(buffer, 'localvar_%s_suggest' % plugin_name)
def get_aspell_dict_for(buffer):
# this should never happens, but to be sure. Otherwise WeeChat will crash
if buffer == '':
return ''
if int(version) >= 0x00040100:
return weechat.info_get("%s_dict" % plugin_name, buffer)
# this is a "simple" work around and it only works for buffers with given dictionary
# no fallback for partial name like "aspell.dict.irc". Get your hands on WeeChat 0.4.1
full_name = weechat.buffer_get_string(buffer,'full_name')
return weechat.config_string(weechat.config_get('%s.dict.%s' % (plugin_name,weechat.buffer_get_string(buffer,'full_name'))))
def substitute_colors(text):
if int(version) >= 0x00040200:
return weechat.string_eval_expression(text,{},{},{})
# substitute colors in output
return re.sub(regex_color, lambda match: weechat.color(match.group(1)), text)
def get_position_and_suggest_item(buffer):
if weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item'):
tab_complete,position,aspell_suggest_item = weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item').split(':',2)
return (tab_complete,position,aspell_suggest_item)
else:
return ('', '', '')
def aspell_suggest_cb(data, signal, signal_data):
buffer = signal_data
if OPTIONS['replace_mode'].lower() == 'on':
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if localvar_aspell_suggest:
# aspell says, suggested word is also misspelled. check out if we already have a suggestion list and don't use the new misspelled word!
if weechat.buffer_get_string(buffer,'localvar_inline_suggestions'):
return weechat.WEECHAT_RC_OK
if not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
aspell_suggestions = aspell_suggestions.replace('/',',')
weechat.buffer_set(buffer, 'localvar_set_inline_suggestions', '%s:%s:%s' % ('2','0',aspell_suggestions))
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
if OPTIONS['auto_pop_up_item'].lower() == 'on':
auto_suggest_cmd_cb('', buffer, '')
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_input_line', '')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
def get_last_position_of_misspelled_word(misspelled_word, buffer):
input_pos = weechat.buffer_get_integer(buffer,'input_pos')
input_line = weechat.buffer_get_string(buffer, 'input')
x = input_line.rfind(misspelled_word, 0, int(input_pos))
y = x + len(misspelled_word)
return x, y, input_pos
# this is a work-around for multiline
def multiline_cb(data, signal, signal_data):
global multiline_input
multiline_input = signal_data
# if multiline_input == '1':
# buffer = weechat.window_get_pointer(weechat.current_window(),"buffer")
# input_line = weechat.buffer_get_string(buffer, 'input')
# else:
# buffer = weechat.window_get_pointer(weechat.current_window(),"buffer")
# input_line_bak = weechat.buffer_get_string(buffer, 'input')
# if input_line != input_line_bak:
# input_text_changed_cb('','',buffer)
return weechat.WEECHAT_RC_OK
# ================================[ hook_keys() ]===============================
# TAB key pressed?
def input_complete_cb(data, buffer, command):
# check if a misspelled word already exists!
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if not localvar_aspell_suggest and not weechat.buffer_get_string(buffer,'localvar_inline_replace_mode'):
return weechat.WEECHAT_RC_OK
# first [TAB] on a misspelled word in "replace mode"
if OPTIONS['replace_mode'].lower() == "on" and not weechat.buffer_get_string(buffer,'localvar_inline_replace_mode') and int(OPTIONS['complete_near']) >= 0:
weechat.buffer_set(buffer, 'localvar_set_inline_replace_mode', '1')
if not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
begin_last_position, end_last_position, input_pos = get_last_position_of_misspelled_word(misspelled_word, buffer)
# maybe nick completion?
if begin_last_position == -1:
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
if input_pos - end_last_position > int(OPTIONS['complete_near']):
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
aspell_suggestions = aspell_suggestions.replace('/',',')
weechat.buffer_set(buffer, 'localvar_set_inline_suggestions', '%s:%s:%s' % ('2','0',aspell_suggestions))
weechat.buffer_set(buffer, 'localvar_set_save_position_of_word', '%s:%s' % (begin_last_position, end_last_position))
inline_suggestions = aspell_suggestions.split(',')
input_line = weechat.buffer_get_string(buffer, 'input')
input_line = input_line[:begin_last_position] + inline_suggestions[0] + input_line[end_last_position:]
# input_line = input_line.replace(misspelled_word, inline_suggestions[0])
word_differ = 0
if len(misspelled_word) > len(inline_suggestions[0]):
word_differ = len(misspelled_word) - len(inline_suggestions[0])
else:
word_differ = len(inline_suggestions[0]) - len(misspelled_word)
if input_line[-2:] == ' ':
input_line = input_line.rstrip()
input_line = input_line + ' '
weechat.buffer_set(buffer,'input',input_line)
input_pos = int(input_pos) + word_differ
weechat.buffer_set(buffer,'input_pos',str(input_pos))
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# after first [TAB] on a misspelled word in "replace mode"
if OPTIONS['replace_mode'].lower() == "on" and weechat.buffer_get_string(buffer,'localvar_inline_replace_mode') == "1" and int(OPTIONS['complete_near']) >= 0:
if not ":" in weechat.buffer_get_string(buffer,'localvar_inline_suggestions'):
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_items = weechat.buffer_get_string(buffer,'localvar_inline_suggestions').split(':',2)
if not position or not aspell_suggest_items:
weechat.buffer_set(buffer, 'localvar_del_inline_replace_mode', '')
return weechat.WEECHAT_RC_OK
inline_suggestions = aspell_suggest_items.split(',')
position = int(position)
previous_position = position
# cycle backwards through suggestions
if command == '/input complete_previous':
# position <= -1? go to last suggestion
if position <= -1:
position = len(inline_suggestions)-1
else:
position -= 1
# cycle forward through suggestions
elif command == '/input complete_next':
if position >= len(inline_suggestions)-1:
position = 0
else:
position += 1
begin_last_position, end_last_position, input_pos = get_last_position_of_misspelled_word(inline_suggestions[previous_position], buffer)
if input_pos - end_last_position > int(OPTIONS['complete_near']):
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
input_line = weechat.buffer_get_string(buffer, 'input')
input_line = input_line[:begin_last_position] + inline_suggestions[position] + input_line[end_last_position:]
# input_line = input_line.replace(inline_suggestions[previous_position], inline_suggestions[position])
word_differ = 0
if len(inline_suggestions[previous_position]) > len(inline_suggestions[position]):
word_differ = len(inline_suggestions[previous_position]) - len(inline_suggestions[position])
else:
word_differ = len(inline_suggestions[position]) - len(inline_suggestions[previous_position])
if input_line[-2:] == ' ':
input_line = input_line.rstrip()
input_line = input_line + ' '
weechat.buffer_set(buffer,'input',input_line)
input_pos = int(input_pos) + word_differ
weechat.buffer_set(buffer,'input_pos',str(input_pos))
weechat.buffer_set(buffer, 'localvar_set_inline_suggestions', '%s:%s:%s' % ('2',str(position),aspell_suggest_items))
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
if int(OPTIONS['complete_near']) > 0:
if not ":" in localvar_aspell_suggest:
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
begin_last_position, end_last_position, input_pos = get_last_position_of_misspelled_word(misspelled_word, buffer)
if input_pos - end_last_position > int(OPTIONS['complete_near']):
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('2',position,aspell_suggest_item))
auto_suggest_cmd_cb('', buffer, command)
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
def delete_localvar_replace_mode(buffer):
if OPTIONS['replace_mode'].lower() == "on":
weechat.buffer_set(buffer, 'localvar_del_inline_replace_mode', '')
weechat.buffer_set(buffer, 'localvar_del_inline_suggestions', '')
weechat.buffer_set(buffer, 'localvar_del_save_position_of_word', '')
weechat.buffer_set(buffer, 'localvar_del_current_cursor_pos', '')
weechat.bar_item_update('spell_suggestion')
# if a suggestion is selected and you press [RETURN] replace misspelled word!
def input_return_cb(data, signal, signal_data):
buffer = signal
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
if OPTIONS['auto_replace'].lower() == "on" and aspell_suggest_item:
replace_misspelled_word(buffer)
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
# /input delete_*
def input_delete_cb(data, signal, signal_data):
buffer = signal
delete_localvar_replace_mode(buffer)
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_input_line', '')
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# /input move_* (cursor position)
def input_move_cb(data, signal, signal_data):
buffer = signal
if OPTIONS['replace_mode'].lower() == "on" and weechat.buffer_get_string(buffer,'localvar_inline_replace_mode') == "1":
delete_localvar_replace_mode(buffer)
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if not localvar_aspell_suggest or not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
if not aspell_suggest_item in aspell_suggestions:
aspell_suggestion_list = aspell_suggestions.split(',',1)
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('1',0,aspell_suggestion_list[0]))
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('1',position,aspell_suggest_item))
return weechat.WEECHAT_RC_OK
def show_spell_suggestion_item_cb (data, item, window):
buffer = weechat.window_get_pointer(window,"buffer")
if buffer == '':
return ''
if OPTIONS['replace_mode'].lower() == "on":
if not weechat.buffer_get_string(buffer,'localvar_inline_suggestions'):
return ''
tab_complete,position,aspell_suggest_items = weechat.buffer_get_string(buffer,'localvar_inline_suggestions').split(':',2)
localvar_aspell_suggest = "dummy:%s" % aspell_suggest_items
# return aspell_suggest_items
else:
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
# localvar_aspell_suggest = word,word2/wort,wort2
if localvar_aspell_suggest:
try:
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
except ValueError: # maybe no suggestion for misspelled word. then go back
return ''
aspell_suggestions_orig = aspell_suggestions
aspell_suggestions = aspell_suggestions.replace('/',',')
aspell_suggestion_list = aspell_suggestions.split(',')
if not position:
return ''
if position == "-1":
return aspell_suggestions_orig
if int(position) < len(aspell_suggestion_list):
reset_color = weechat.color('reset')
color = weechat.color(weechat.config_color(weechat.config_get("%s.color.misspelled" % plugin_name)))
new_word = aspell_suggestion_list[int(position)].replace(aspell_suggestion_list[int(position)],'%s%s%s' % (color, aspell_suggestion_list[int(position)], reset_color))
aspell_suggestion_list[int(position)] = new_word # replace word with colored word
aspell_suggestions_orig = ','.join(map(str, aspell_suggestion_list))
else:
return ''
return aspell_suggestions_orig
def window_switch_cb(data, signal, signal_data):
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
def buffer_switch_cb(data, signal, signal_data):
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
# ================================[ check for nick ]===============================
def weechat_nicklist_search_nick(buffer, nick):
return weechat.nicklist_search_nick(buffer, "", nick)
# ================================[ main ]===============================
if __name__ == "__main__":
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
version = weechat.info_get("version_number", "") or 0
if int(version) < 0x00040000:
weechat.prnt('','%s%s %s' % (weechat.prefix('error'),SCRIPT_NAME,': needs version 0.4.0 or higher'))
weechat.command('','/wait 1ms /python unload %s' % SCRIPT_NAME)
if int(version) < 0x02050000:
plugin_name = old_plugin_name
SCRIPT_HELP = """addword : add a word to personal aspell dictionary (does not work with multiple dicts)
previous : to cycle though previous suggestion
replace : to replace misspelled word
Quick start:
You should add script item "spell_correction" to a bar (i suggest using the input_bar).
On an misspelled word, press TAB to cycle through suggestions. Press any key on suggestion
to replace misspelled word with current displayed suggestion.
Also check script options: /fset %(s)s
IMPORTANT:
"%(p)s.check.suggestions" option has to be set to a value >= 0 (default: -1 (off)).
"%(p)s.color.misspelled" option is used to highlight current suggestion in "%(p)s_suggestion" item
Using "%(p)s.check.real_time" the nick-completion will not work. All misspelled words
in input_line have to be replaced first.
""" %dict(p=plugin_name, s=SCRIPT_NAME)
weechat.hook_command(SCRIPT_NAME, SCRIPT_DESC, 'addword <word>||previous||replace',
SCRIPT_HELP+
'',
'previous|replace|addword',
'auto_suggest_cmd_cb', '')
init_options()
weechat.hook_command_run('/input delete_*', 'input_delete_cb', '')
weechat.hook_command_run('/input move*', 'input_move_cb', '')
weechat.hook_signal ('input_text_changed', 'input_text_changed_cb', '')
# multiline workaround
weechat.hook_signal('input_flow_free', 'multiline_cb', '')
weechat.hook_signal ('%s_suggest' % plugin_name, 'aspell_suggest_cb', '')
weechat.hook_signal ('buffer_switch', 'buffer_switch_cb','')
weechat.hook_signal ('window_switch', 'window_switch_cb','')
if OPTIONS['catch_input_completion'].lower() == "on":
Hooks['catch_input_completion'] = weechat.hook_command_run('/input complete*', 'input_complete_cb', '')
Hooks['catch_input_return'] = weechat.hook_command_run('/input return', 'input_return_cb', '')
weechat.hook_config('plugins.var.python.' + SCRIPT_NAME + '.*', 'toggle_refresh', '')
weechat.bar_item_new('spell_correction', 'show_spell_correction_item_cb', '')
weechat.bar_item_new('spell_suggestion', 'show_spell_suggestion_item_cb', '')
|
{
"content_hash": "d7ff217f20e4c959b32624bef31f9b29",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 236,
"avg_line_length": 47.11854103343465,
"alnum_prop": 0.6388853051219198,
"repo_name": "MatthewCox/dotfiles",
"id": "155c39ab7d75fd1af22d880ee4e98bdee9752871",
"size": "33716",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "weechat/python/spell_correction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "42448"
},
{
"name": "Perl",
"bytes": "174542"
},
{
"name": "Python",
"bytes": "568864"
},
{
"name": "Ruby",
"bytes": "1101"
},
{
"name": "Shell",
"bytes": "46239"
},
{
"name": "Vim script",
"bytes": "9381"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import os
from django.core.management.base import BaseCommand
from optparse import make_option
from py3compat import PY2
from snisi_core.models.Entities import (
Entity, HealthEntity, AdministrativeEntity, EntityType)
if PY2:
import unicodecsv as csv
else:
import csv
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f',
help='CSV file',
action='store',
dest='filename'),
)
def handle(self, *args, **options):
if not os.path.exists(options.get('filename') or ""):
logger.error("CSV file `{}` does not exist."
.format(options.get('filename')))
return
headers = ['action', 'slug', 'parent', 'name', 'type',
'has_urenam', 'has_urenas', 'has_ureni', 'main_entity']
input_csv_file = open(options.get('filename'), 'r')
csv_reader = csv.DictReader(input_csv_file, fieldnames=headers)
has_matrix = {
'y': True,
'n': False
}
for entry in csv_reader:
if csv_reader.line_num == 1:
continue
logger.debug(entry)
if entry.get('action') == 'create':
logger.info("Creating {}".format(entry.get('name')))
parent = Entity.get_or_none(entry.get('parent'))
etype = EntityType.get_or_none(entry.get('type'))
hcls = HealthEntity if etype.slug.startswith('health_') \
else AdministrativeEntity
entity = hcls.objects.create(
slug=entry.get('slug').upper(),
name=entry.get('name').upper(),
type=etype,
parent=parent)
if entry.get('has_urenam') == 'y':
entity.has_urenam = True
if entry.get('has_urenas') == 'y':
entity.has_urenas = True
if entry.get('has_ureni') == 'y':
entity.has_ureni = True
entity.save()
elif entry.get('action') == 'disable':
entity = Entity.get_or_none(entry.get('slug'))
logger.info("Disabling {}".format(entity))
entity.active = False
entity.save()
elif entry.get('action') == 'enable':
entity = Entity.get_or_none(entry.get('slug'))
logger.info("Enabling {}".format(entity))
entity.active = True
entity.save()
elif entry.get('action') == 'update':
entity = Entity.get_or_none(entry.get('slug'))
logger.info("Updating {}".format(entity))
if entry.get('name'):
entity.name = entry.get('name').upper()
entity.save()
if entry.get('type'):
etype = EntityType.get_or_none(entry.get('type'))
entity.type = etype
entity.save()
if entry.get('parent'):
parent = Entity.get_or_none(entry.get('parent'))
entity.parent = parent
entity.save()
# update main_entity ?
if entry.get('main_entity'):
main_entity = Entity.get_or_none(entry.get('main_entity'))
entity.main_entity = main_entity
entity.save()
if entry.get('has_urenam'):
entity.has_urenam = has_matrix.get(
entry.get('has_urenam').lower(), False)
entity.save()
if entry.get('has_urenas'):
entity.has_urenas = has_matrix.get(
entry.get('has_urenas').lower(), False)
entity.save()
if entry.get('has_ureni'):
entity.has_ureni = has_matrix.get(
entry.get('has_ureni').lower(), False)
entity.save()
logger.info("Done")
|
{
"content_hash": "c8115549100a8da906dd1309a0faf0b6",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 37.495652173913044,
"alnum_prop": 0.4837662337662338,
"repo_name": "yeleman/snisi",
"id": "c5dc3192fc0cbdc5a86db50020425f5df9fb08b4",
"size": "4391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snisi_maint/management/commands/update-entities-from-std-csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "410022"
},
{
"name": "HTML",
"bytes": "1007275"
},
{
"name": "Java",
"bytes": "7211"
},
{
"name": "JavaScript",
"bytes": "292583"
},
{
"name": "Python",
"bytes": "2237855"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
from geocode import getGeocodeLocation
import json
import httplib2
import sys
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
foursquare_client_id = "PASTE_CLIENT_ID_HERE"
foursquare_client_secret = "PASTE_CLIENT_SECRET_HERE"
def findARestaurant(mealType,location):
#1. Use getGeocodeLocation to get the latitude and longitude coordinates of the location string.
latitude, longitude = getGeocodeLocation(location)
#2. Use foursquare API to find a nearby restaurant with the latitude, longitude, and mealType strings.
#HINT: format for url will be something like https://api.foursquare.com/v2/venues/search?client_id=CLIENT_ID&client_secret=CLIENT_SECRET&v=20130815&ll=40.7,-74&query=sushi
url = ('https://api.foursquare.com/v2/venues/search?client_id=%s&client_secret=%s&v=20130815&ll=%s,%s&query=%s' % (foursquare_client_id, foursquare_client_secret,latitude,longitude,mealType))
h = httplib2.Http()
result = json.loads(h.request(url,'GET')[1])
if result['response']['venues']:
#3. Grab the first restaurant
restaurant = result['response']['venues'][0]
venue_id = restaurant['id']
restaurant_name = restaurant['name']
restaurant_address = restaurant['location']['formattedAddress']
address = ""
for i in restaurant_address:
address += i + " "
restaurant_address = address
#4. Get a 300x300 picture of the restaurant using the venue_id (you can change this by altering the 300x300 value in the URL or replacing it with 'orginal' to get the original picture
url = ('https://api.foursquare.com/v2/venues/%s/photos?client_id=%s&v=20150603&client_secret=%s' % ((venue_id,foursquare_client_id,foursquare_client_secret)))
result = json.loads(h.request(url, 'GET')[1])
#5. Grab the first image
if result['response']['photos']['items']:
firstpic = result['response']['photos']['items'][0]
prefix = firstpic['prefix']
suffix = firstpic['suffix']
imageURL = prefix + "300x300" + suffix
else:
#6. if no image available, insert default image url
imageURL = "http://pixabay.com/get/8926af5eb597ca51ca4c/1433440765/cheeseburger-34314_1280.png?direct"
#7. return a dictionary containing the restaurant name, address, and image url
restaurantInfo = {'name':restaurant_name, 'address':restaurant_address, 'image':imageURL}
print "Restaurant Name: %s" % restaurantInfo['name']
print "Restaurant Address: %s" % restaurantInfo['address']
print "Image: %s \n" % restaurantInfo['image']
return restaurantInfo
else:
print "No Restaurants Found for %s" % location
return "No Restaurants Found"
if __name__ == '__main__':
findARestaurant("Pizza", "Tokyo, Japan")
findARestaurant("Tacos", "Jakarta, Indonesia")
findARestaurant("Tapas", "Maputo, Mozambique")
findARestaurant("Falafel", "Cairo, Egypt")
findARestaurant("Spaghetti", "New Delhi, India")
findARestaurant("Cappuccino", "Geneva, Switzerland")
findARestaurant("Sushi", "Los Angeles, California")
findARestaurant("Steak", "La Paz, Bolivia")
findARestaurant("Gyros", "Sydney Australia")
|
{
"content_hash": "8533d23643d39815e9085d622250ea16",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 192,
"avg_line_length": 47.43076923076923,
"alnum_prop": 0.7314304249108011,
"repo_name": "tuanvu216/udacity-course",
"id": "12eb11dc8a7b4c6472aa1980482795e05fb406f9",
"size": "3083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designing-restful-apis/Lesson_2/12_Make_Your_Own_Mashup/solution_code/findARestaurant.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3736"
},
{
"name": "HTML",
"bytes": "143388"
},
{
"name": "JavaScript",
"bytes": "169689"
},
{
"name": "Jupyter Notebook",
"bytes": "3237655"
},
{
"name": "Python",
"bytes": "400129"
},
{
"name": "Ruby",
"bytes": "448"
},
{
"name": "Shell",
"bytes": "538"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import math
from st2common.runners.base_action import Action
from six.moves import range
class PascalRowAction(Action):
def run(self, **kwargs):
# We call list values to verify that log messages are not duplicated when
# datastore service is used
try:
self.action_service.list_values()
except Exception:
pass
self.logger.info("test info log message")
self.logger.debug("test debug log message")
self.logger.error("test error log message")
return PascalRowAction._compute_pascal_row(**kwargs)
@staticmethod
def _compute_pascal_row(row_index=0):
print("Pascal row action")
if row_index == "a":
return False, "This is suppose to fail don't worry!!"
elif row_index == "b":
return None
elif row_index == "complex_type":
result = PascalRowAction()
return (False, result)
elif row_index == "c":
return False, None
elif row_index == "d":
return "succeeded", [1, 2, 3, 4]
elif row_index == "e":
return [1, 2]
elif row_index == 5:
return [
math.factorial(row_index)
/ (math.factorial(i) * math.factorial(row_index - i))
for i in range(row_index + 1)
]
elif row_index == "f":
raise ValueError("Duplicate traceback test")
else:
return True, [
math.factorial(row_index)
/ (math.factorial(i) * math.factorial(row_index - i))
for i in range(row_index + 1)
]
|
{
"content_hash": "1664e05e88afd9217c3471ff14c5a932",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 81,
"avg_line_length": 32.24528301886792,
"alnum_prop": 0.545348156816852,
"repo_name": "StackStorm/st2",
"id": "cacb89d00532bd09f9b22a260b0090ab6bdf4da2",
"size": "2337",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2tests/st2tests/resources/packs/pythonactions/actions/pascal_row.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
import sys
from stix.core import STIXPackage
def parse_stix(pkg):
print("== Campaign ==")
for camp in pkg.campaigns:
print("---")
print("Campaign: " + str(camp.title))
for tactic in camp.related_ttps:
ttp = pkg.find(tactic.item.idref)
print("RelatedTTP: " + str(ttp.title))
print("Relationship: " + str(tactic.relationship))
for target in ttp.victim_targeting.targeted_information:
print("Target: " + str(target))
return 0
if __name__ == '__main__':
try:
fname = sys.argv[1]
except:
exit(1)
fd = open(fname)
stix_pkg = STIXPackage.from_xml(fd)
parse_stix(stix_pkg)
|
{
"content_hash": "83b5446c3c2e138f8b25fb306affcb57",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 68,
"avg_line_length": 25.25,
"alnum_prop": 0.562942008486563,
"repo_name": "jburns12/stixproject.github.io",
"id": "058114bc6fdc401f46a5b40265c52677ee863b54",
"size": "834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "documentation/idioms/victim-targeting/victim-targeting_consumer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10059"
},
{
"name": "HTML",
"bytes": "18146823"
},
{
"name": "JavaScript",
"bytes": "3731"
},
{
"name": "Ruby",
"bytes": "24306"
},
{
"name": "Shell",
"bytes": "3599"
}
],
"symlink_target": ""
}
|
from exp_tools import Trial
from psychopy import visual, event
class FlashInstructions(Trial):
"""
Class that runs an Instructions trial. This is a parent for FlashInstructionsPractice.
A FlashInstructions trial draws a *single* text screen, records keyboard responses (response_keys,
scanner pulses, and quit-keys (escape, space)). Each text screen should be initialized separately.
Assumes that the actual visual objects (visual.TextStim) are initiated in the Session. This greatly improves
speed, because rendering is done at the start of the experiment rather than at the start of the trial.
Parameters
----------
ID: int
ID number of trial
block_trial_ID: int
Number of trial within the current block
parameters: dict
Dictionary containing parameters that specify what is drawn. Currently, only supports "draw_crosses" as a
key, with boolean value.
phase_durations : list
List specifying the durations of each phase if the NullTrial
session: exp_tools.Session instance
screen: psychopy.visual.Window instance
tracker: pygaze.EyeTracker object
Passed on to parent class
"""
def __init__(self, ID, parameters={}, phase_durations=[], session=None, screen=None, tracker=None):
super(FlashInstructions, self).__init__(parameters=parameters, phase_durations=phase_durations,
session=session, screen=screen, tracker=tracker)
self.ID = ID
self.phase_durations = phase_durations
self.start_time = self.end_time = 0
self.response_time = 0
def draw(self):
self.session.current_instruction.draw()
super(FlashInstructions, self).draw()
def event(self):
"""
Only listen for space (skip instructions), escape (kill session), and scanner pulses
"""
for i, (ev, ev_time) in enumerate(event.getKeys(timeStamped=self.session.clock)):
# ev_time is the event timestamp relative to the Session Clock
if len(ev) > 0:
if ev in ['esc', 'escape']:
self.events.append([-99, ev_time, 'escape: user killed session'])
self.stopped = True
self.session.stopped = True
print('Session stopped!')
elif ev == 'space' or ev in self.session.response_keys:
self.response_time = ev_time
self.events.append([ev, ev_time - self.start_time])
self.stopped = True
elif ev == 't': # Scanner pulse
self.events.append([99, ev_time, 'pulse'])
def run(self):
super(FlashInstructions, self).run()
self.start_time = self.session.clock.getTime()
while not self.stopped:
self.end_time = self.session.clock.getTime()
if self.end_time - self.start_time >= self.phase_durations[0]:
self.stopped = True
# check for keyboard (events) and draw
if not self.stopped:
self.event()
self.draw()
self.stop()
class FlashInstructionsPractice(FlashInstructions):
"""
Same as FlashInstructions, but allows moving between blocks
"""
def __init__(self, ID, parameters={}, phase_durations=[], session=None, screen=None, tracker=None):
super(FlashInstructionsPractice, self).__init__(ID=ID, parameters=parameters,
phase_durations=phase_durations, session=session,
screen=screen, tracker=tracker)
def event(self):
"""
Only listen for space (skip instructions), escape (kill session), and scanner pulses
"""
for i, (ev, ev_time) in enumerate(event.getKeys(timeStamped=self.session.clock)):
# ev_time is the event timestamp relative to the Session Clock
if len(ev) > 0:
if ev in ['esc', 'escape']:
self.events.append([-99, ev_time, 'escape: user killed session'])
self.stopped = True
self.session.stopped = True
print('Session stopped!')
elif ev == 'space' or ev in self.session.response_keys:
self.events.append([ev, ev_time - self.start_time])
self.session.stop_instructions = False
self.stopped = True
elif ev == 't': # Scanner pulse
self.events.append([99, ev_time, 'pulse'])
elif ev == 'left':
if not self.session.current_block == 0:
self.events.append([-1, ev_time, 'user restarts previous block'])
self.session.current_block -= 1
self.stopped = True
self.session.stop_instructions = True
elif ev == 'right':
if not self.session.current_block == 7:
self.events.append([-2, ev_time, 'user fast forwards to next block'])
self.session.current_block += 1
self.stopped = True
self.session.stop_instructions = True
class FlashEndBlockInstructions(FlashInstructions):
def __init__(self, ID, parameters={}, phase_durations=[], session=None, screen=None, tracker=None):
super(FlashEndBlockInstructions, self).__init__(ID=ID, parameters=parameters,
phase_durations=phase_durations, session=session,
screen=screen, tracker=tracker)
self.stop_key = None
# def draw(self):
#
# self.session.block_end_instructions[0].draw()
#
# super(FlashInstructions, self).draw()
def event(self):
for i, (ev, ev_time) in enumerate(event.getKeys(timeStamped=self.session.clock)):
# ev_time is the event timestamp relative to the Session Clock
if len(ev) > 0:
if ev in ['esc', 'escape']:
self.events.append([-99, ev_time, 'escape: user killed session'])
self.stopped = True
self.session.stopped = True
print('Session stopped!')
elif ev == 'space' or ev == 'r':
self.stop_key = ev
self.response_time = ev_time
self.events.append([ev, ev_time - self.start_time])
self.stopped = True
elif ev == 't': # Scanner pulse
self.events.append([99, ev_time, 'pulse'])
class FlashInstructionsNoResp(FlashInstructions):
"""
Same as FlashInstructions but participant cannot respond / skip
"""
def __init__(self, ID, parameters={}, phase_durations=[], session=None, screen=None, tracker=None):
super(FlashInstructionsNoResp, self).__init__(ID=ID, parameters=parameters,
phase_durations=phase_durations, session=session,
screen=screen, tracker=tracker)
def event(self):
"""
Only listen for space (skip instructions), escape (kill session), and scanner pulses
"""
for i, (ev, ev_time) in enumerate(event.getKeys(timeStamped=self.session.clock)):
# ev_time is the event timestamp relative to the Session Clock
if len(ev) > 0:
if ev in ['esc', 'escape']:
self.events.append([-99, ev_time, 'escape: user killed session'])
self.stopped = True
self.session.stopped = True
print('Session stopped!')
elif ev == 'equal':
self.events.append([ev, ev_time - self.start_time])
self.session.stop_instructions = False
self.stopped = True
elif ev == 't': # Scanner pulse
self.events.append([99, ev_time, 'pulse'])
|
{
"content_hash": "6f42ae228ae37871cbb329222414bc19",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 113,
"avg_line_length": 41.1044776119403,
"alnum_prop": 0.5498668603243767,
"repo_name": "StevenM1/flashtask",
"id": "f7a446188d1643b189372753a2a7b2e2b0d8d757",
"size": "8302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FlashInstructions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "22917045"
},
{
"name": "Mathematica",
"bytes": "879590"
},
{
"name": "Python",
"bytes": "335678"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0010_team_courseorder'),
]
operations = [
migrations.AddField(
model_name='game',
name='initialVenueId',
field=models.CharField(default=b'', max_length=30, verbose_name=b'inital venue id'),
),
]
|
{
"content_hash": "bc5bb28947aaaf873277b0e03c5c89c3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 96,
"avg_line_length": 23.61111111111111,
"alnum_prop": 0.6070588235294118,
"repo_name": "zackzachariah/scavenger",
"id": "8981d6d31c3d2cd5322ddd27ea42f1d2c1dfd136",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/migrations/0011_game_initialvenueid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1692"
},
{
"name": "HTML",
"bytes": "10236"
},
{
"name": "Python",
"bytes": "29109"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, verbose_name='is active')),
('name', models.CharField(max_length=100, verbose_name='name')),
('slug', models.SlugField(unique=True, verbose_name='slug')),
('priority', models.SmallIntegerField(default=0, help_text='Set the priority in case of cross posts. Higher is better.', verbose_name='priority')),
('show_min', models.PositiveSmallIntegerField(default=0, help_text='Show at least this many entries', verbose_name='Show at least')),
('source', models.CharField(blank=True, max_length=10, verbose_name='source', choices=[(b'facebook', 'Facebook'), (b'twitter', 'Twitter'), (b'blog', 'Elephantblog'), (b'rss', 'RSS')])),
('data', models.TextField(verbose_name='configuration data', blank=True)),
],
options={
'ordering': ['priority', 'name'],
'verbose_name': 'source',
'verbose_name_plural': 'sources',
},
),
migrations.CreateModel(
name='Story',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, verbose_name='is active')),
('timestamp', models.DateTimeField(default=django.utils.timezone.now, verbose_name='timestamp')),
('object_url', models.URLField(unique=True, verbose_name='object URL')),
('title', models.CharField(max_length=1000, verbose_name='title')),
('author', models.CharField(max_length=100, verbose_name='author', blank=True)),
('body', models.TextField(help_text='Content of the story. May contain HTML.', verbose_name='body', blank=True)),
('image_url', models.CharField(max_length=1000, verbose_name='image URL', blank=True)),
('source', models.ForeignKey(related_name='stories', verbose_name='source', to='newswall.Source')),
],
options={
'ordering': ['-timestamp'],
'verbose_name': 'story',
'verbose_name_plural': 'stories',
},
),
]
|
{
"content_hash": "f41d1248359311e2fde6d2b0ef106de7",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 201,
"avg_line_length": 53.64,
"alnum_prop": 0.5764354958985831,
"repo_name": "michaelkuty/django-newswall",
"id": "2d2434609ee2fb3bb9dabf8f16984376ca1bdf3e",
"size": "2706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newswall/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3397"
},
{
"name": "Python",
"bytes": "53508"
}
],
"symlink_target": ""
}
|
import os
from binascii import hexlify
from torba.coin.bitcoinsegwit import MainNetLedger
from torba.client.wallet import Wallet
from client_tests.unit.test_transaction import get_transaction, get_output
from client_tests.unit.test_headers import BitcoinHeadersTestCase, block_bytes
class MockNetwork:
def __init__(self, history, transaction):
self.history = history
self.transaction = transaction
self.address = None
self.get_history_called = []
self.get_transaction_called = []
self.is_connected = False
async def get_history(self, address):
self.get_history_called.append(address)
self.address = address
return self.history
async def get_merkle(self, txid, height):
return {'merkle': ['abcd01'], 'pos': 1}
async def get_transaction(self, tx_hash):
self.get_transaction_called.append(tx_hash)
return self.transaction[tx_hash]
class LedgerTestCase(BitcoinHeadersTestCase):
async def asyncSetUp(self):
self.ledger = MainNetLedger({
'db': MainNetLedger.database_class(':memory:'),
'headers': MainNetLedger.headers_class(':memory:')
})
await self.ledger.db.open()
async def asyncTearDown(self):
await self.ledger.db.close()
def make_header(self, **kwargs):
header = {
'bits': 486604799,
'block_height': 0,
'merkle_root': b'4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b',
'nonce': 2083236893,
'prev_block_hash': b'0000000000000000000000000000000000000000000000000000000000000000',
'timestamp': 1231006505,
'version': 1
}
header.update(kwargs)
header['merkle_root'] = header['merkle_root'].ljust(64, b'a')
header['prev_block_hash'] = header['prev_block_hash'].ljust(64, b'0')
return self.ledger.headers.serialize(header)
def add_header(self, **kwargs):
serialized = self.make_header(**kwargs)
self.ledger.headers.io.seek(0, os.SEEK_END)
self.ledger.headers.io.write(serialized)
self.ledger.headers._size = None
class TestSynchronization(LedgerTestCase):
async def test_update_history(self):
account = self.ledger.account_class.generate(self.ledger, Wallet(), "torba")
address = await account.receiving.get_or_create_usable_address()
address_details = await self.ledger.db.get_address(address=address)
self.assertEqual(address_details['history'], None)
self.add_header(block_height=0, merkle_root=b'abcd04')
self.add_header(block_height=1, merkle_root=b'abcd04')
self.add_header(block_height=2, merkle_root=b'abcd04')
self.add_header(block_height=3, merkle_root=b'abcd04')
self.ledger.network = MockNetwork([
{'tx_hash': 'abcd01', 'height': 0},
{'tx_hash': 'abcd02', 'height': 1},
{'tx_hash': 'abcd03', 'height': 2},
], {
'abcd01': hexlify(get_transaction(get_output(1)).raw),
'abcd02': hexlify(get_transaction(get_output(2)).raw),
'abcd03': hexlify(get_transaction(get_output(3)).raw),
})
await self.ledger.update_history(address, '')
self.assertEqual(self.ledger.network.get_history_called, [address])
self.assertEqual(self.ledger.network.get_transaction_called, ['abcd01', 'abcd02', 'abcd03'])
address_details = await self.ledger.db.get_address(address=address)
self.assertEqual(
address_details['history'],
'252bda9b22cc902ca2aa2de3548ee8baf06b8501ff7bfb3b0b7d980dbd1bf792:0:'
'ab9c0654dd484ac20437030f2034e25dcb29fc507e84b91138f80adc3af738f9:1:'
'a2ae3d1db3c727e7d696122cab39ee20a7f81856dab7019056dd539f38c548a0:2:'
)
self.ledger.network.get_history_called = []
self.ledger.network.get_transaction_called = []
await self.ledger.update_history(address, '')
self.assertEqual(self.ledger.network.get_history_called, [address])
self.assertEqual(self.ledger.network.get_transaction_called, [])
self.ledger.network.history.append({'tx_hash': 'abcd04', 'height': 3})
self.ledger.network.transaction['abcd04'] = hexlify(get_transaction(get_output(4)).raw)
self.ledger.network.get_history_called = []
self.ledger.network.get_transaction_called = []
await self.ledger.update_history(address, '')
self.assertEqual(self.ledger.network.get_history_called, [address])
self.assertEqual(self.ledger.network.get_transaction_called, ['abcd04'])
address_details = await self.ledger.db.get_address(address=address)
self.assertEqual(
address_details['history'],
'252bda9b22cc902ca2aa2de3548ee8baf06b8501ff7bfb3b0b7d980dbd1bf792:0:'
'ab9c0654dd484ac20437030f2034e25dcb29fc507e84b91138f80adc3af738f9:1:'
'a2ae3d1db3c727e7d696122cab39ee20a7f81856dab7019056dd539f38c548a0:2:'
'047cf1d53ef68f0fd586d46f90c09ff8e57a4180f67e7f4b8dd0135c3741e828:3:'
)
class MocHeaderNetwork:
def __init__(self, responses):
self.responses = responses
async def get_headers(self, height, blocks):
return self.responses[height]
class BlockchainReorganizationTests(LedgerTestCase):
async def test_1_block_reorganization(self):
self.ledger.network = MocHeaderNetwork({
20: {'height': 20, 'count': 5, 'hex': hexlify(
self.get_bytes(after=block_bytes(20), upto=block_bytes(5))
)},
25: {'height': 25, 'count': 0, 'hex': b''}
})
headers = self.ledger.headers
await headers.connect(0, self.get_bytes(upto=block_bytes(20)))
self.add_header(block_height=len(headers))
self.assertEqual(headers.height, 20)
await self.ledger.receive_header([{
'height': 21, 'hex': hexlify(self.make_header(block_height=21))
}])
async def test_3_block_reorganization(self):
self.ledger.network = MocHeaderNetwork({
20: {'height': 20, 'count': 5, 'hex': hexlify(
self.get_bytes(after=block_bytes(20), upto=block_bytes(5))
)},
21: {'height': 21, 'count': 1, 'hex': hexlify(self.make_header(block_height=21))},
22: {'height': 22, 'count': 1, 'hex': hexlify(self.make_header(block_height=22))},
25: {'height': 25, 'count': 0, 'hex': b''}
})
headers = self.ledger.headers
await headers.connect(0, self.get_bytes(upto=block_bytes(20)))
self.add_header(block_height=len(headers))
self.add_header(block_height=len(headers))
self.add_header(block_height=len(headers))
self.assertEqual(headers.height, 22)
await self.ledger.receive_header(({
'height': 23, 'hex': hexlify(self.make_header(block_height=23))
},))
|
{
"content_hash": "06e1da6968a37bac1a01ed5a35aa9378",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 100,
"avg_line_length": 42.09036144578313,
"alnum_prop": 0.64219264348075,
"repo_name": "lbryio/lbry",
"id": "0e077b441e8ce6e9a1cce91898819c4767279434",
"size": "6987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torba/tests/client_tests/unit/test_ledger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3550"
},
{
"name": "HTML",
"bytes": "165150"
},
{
"name": "Makefile",
"bytes": "656"
},
{
"name": "Python",
"bytes": "2099913"
},
{
"name": "Shell",
"bytes": "1730"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.urls import reverse
# Create your tests here.
import datetime
from django.utils import timezone
from django.test import TestCase
from .models import Question
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
should be displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
The detail view of a question with a pub_date in the future should
return a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
The detail view of a question with a pub_date in the past should
display the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
|
{
"content_hash": "8dd755fa3b967a0607b2991c50a1fdfc",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 83,
"avg_line_length": 39.625,
"alnum_prop": 0.6510252365930599,
"repo_name": "kingartjr/example_django",
"id": "f7c596a90a013e13467712641980f6798ad56a78",
"size": "5072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polls/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "1110"
},
{
"name": "Python",
"bytes": "17302"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.conf import settings
from django.apps import apps
@login_required
def index(request):
cd = {'apps': []}
for app in settings.JSONBENCH_APPS:
_app = apps.get_app_config(app)
cd['apps'].append(('{}:index'.format(_app.label), _app.label))
cd['prof'] = 'prof' if 'prof' in request.GET else ''
cd['request_dict'] = dict(request)
return render(request, 'benchmark/index.html', cd)
|
{
"content_hash": "58b97ab3c2ace3aeebc1bf4500f91d95",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 37.92857142857143,
"alnum_prop": 0.6629001883239172,
"repo_name": "jonmsawyer/jsonbench",
"id": "cf4b97e1248c759d4de6b998a7931d620b71f0fe",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/benchmark/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17091"
},
{
"name": "HTML",
"bytes": "17354"
},
{
"name": "Python",
"bytes": "81012"
}
],
"symlink_target": ""
}
|
from boto.ec2.elb.listelement import ListElement
# Namespacing issue with deprecated local class
from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM
from boto.resultset import ResultSet
import boto.utils
import base64
# this should use the corresponding object from boto.ec2
# Currently in use by deprecated local BlockDeviceMapping class
class Ebs(object):
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
self.connection = connection
self.snapshot_id = snapshot_id
self.volume_size = volume_size
def __repr__(self):
return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'SnapshotId':
self.snapshot_id = value
elif name == 'VolumeSize':
self.volume_size = value
class InstanceMonitoring(object):
def __init__(self, connection=None, enabled='false'):
self.connection = connection
self.enabled = enabled
def __repr__(self):
return 'InstanceMonitoring(%s)' % self.enabled
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
self.enabled = value
# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping
# Currently in use by deprecated code for backwards compatability
# Removing this class can also remove the Ebs class in this same file
class BlockDeviceMapping(object):
def __init__(self, connection=None, device_name=None, virtual_name=None,
ebs=None, no_device=None):
self.connection = connection
self.device_name = device_name
self.virtual_name = virtual_name
self.ebs = ebs
self.no_device = no_device
def __repr__(self):
return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
self.virtual_name)
def startElement(self, name, attrs, connection):
if name == 'Ebs':
self.ebs = Ebs(self)
return self.ebs
def endElement(self, name, value, connection):
if name == 'DeviceName':
self.device_name = value
elif name == 'VirtualName':
self.virtual_name = value
elif name == 'NoDevice':
self.no_device = bool(value)
class LaunchConfiguration(object):
def __init__(self, connection=None, name=None, image_id=None,
key_name=None, security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
instance_monitoring=False, spot_price=None,
instance_profile_name=None, ebs_optimized=False,
associate_public_ip_address=None, volume_type=None,
delete_on_termination=True, iops=None, use_block_device_types=False):
"""
A launch configuration.
:type name: str
:param name: Name of the launch configuration to create.
:type image_id: str
:param image_id: Unique ID of the Amazon Machine Image (AMI) which was
assigned during registration.
:type key_name: str
:param key_name: The name of the EC2 key pair.
:type security_groups: list
:param security_groups: Names or security group id's of the security
groups with which to associate the EC2 instances or VPC instances,
respectively.
:type user_data: str
:param user_data: The user data available to launched EC2 instances.
:type instance_type: str
:param instance_type: The instance type
:type kern_id: str
:param kern_id: Kernel id for instance
:type ramdisk_id: str
:param ramdisk_id: RAM disk id for instance
:type block_device_mappings: list
:param block_device_mappings: Specifies how block devices are exposed
for instances
:type instance_monitoring: bool
:param instance_monitoring: Whether instances in group are launched
with detailed monitoring.
:type spot_price: float
:param spot_price: The spot price you are bidding. Only applies
if you are building an autoscaling group with spot instances.
:type instance_profile_name: string
:param instance_profile_name: The name or the Amazon Resource
Name (ARN) of the instance profile associated with the IAM
role for the instance.
:type ebs_optimized: bool
:param ebs_optimized: Specifies whether the instance is optimized
for EBS I/O (true) or not (false).
:type associate_public_ip_address: bool
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
"""
self.connection = connection
self.name = name
self.instance_type = instance_type
self.block_device_mappings = block_device_mappings
self.key_name = key_name
sec_groups = security_groups or []
self.security_groups = ListElement(sec_groups)
self.image_id = image_id
self.ramdisk_id = ramdisk_id
self.created_time = None
self.kernel_id = kernel_id
self.user_data = user_data
self.created_time = None
self.instance_monitoring = instance_monitoring
self.spot_price = spot_price
self.instance_profile_name = instance_profile_name
self.launch_configuration_arn = None
self.ebs_optimized = ebs_optimized
self.associate_public_ip_address = associate_public_ip_address
self.volume_type = volume_type
self.delete_on_termination = delete_on_termination
self.iops = iops
self.use_block_device_types = use_block_device_types
if connection is not None:
self.use_block_device_types = connection.use_block_device_types
def __repr__(self):
return 'LaunchConfiguration:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'SecurityGroups':
return self.security_groups
elif name == 'BlockDeviceMappings':
if self.use_block_device_types:
self.block_device_mappings = BDM()
else:
self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)])
return self.block_device_mappings
elif name == 'InstanceMonitoring':
self.instance_monitoring = InstanceMonitoring(self)
return self.instance_monitoring
def endElement(self, name, value, connection):
if name == 'InstanceType':
self.instance_type = value
elif name == 'LaunchConfigurationName':
self.name = value
elif name == 'KeyName':
self.key_name = value
elif name == 'ImageId':
self.image_id = value
elif name == 'CreatedTime':
self.created_time = boto.utils.parse_ts(value)
elif name == 'KernelId':
self.kernel_id = value
elif name == 'RamdiskId':
self.ramdisk_id = value
elif name == 'UserData':
try:
self.user_data = base64.b64decode(value)
except TypeError:
self.user_data = value
elif name == 'LaunchConfigurationARN':
self.launch_configuration_arn = value
elif name == 'InstanceMonitoring':
self.instance_monitoring = value
elif name == 'SpotPrice':
self.spot_price = float(value)
elif name == 'IamInstanceProfile':
self.instance_profile_name = value
elif name == 'EbsOptimized':
self.ebs_optimized = True if value.lower() == 'true' else False
elif name == 'AssociatePublicIpAddress':
self.associate_public_ip_address = True if value.lower() == 'true' else False
elif name == 'VolumeType':
self.volume_type = value
elif name == 'DeleteOnTermination':
if value.lower() == 'true':
self.delete_on_termination = True
else:
self.delete_on_termination = False
elif name == 'Iops':
self.iops = int(value)
else:
setattr(self, name, value)
def delete(self):
""" Delete this launch configuration. """
return self.connection.delete_launch_configuration(self.name)
|
{
"content_hash": "83c2ddccd065933a7cc39ea2185cc095",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 132,
"avg_line_length": 38.1,
"alnum_prop": 0.6170261326029899,
"repo_name": "kyleknap/boto",
"id": "6d897fd2e31144fb45fa97e01e7158ada9b184f3",
"size": "9941",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "boto/ec2/autoscale/launchconfig.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""The synologydsm component."""
|
{
"content_hash": "7a34496aa5a04aaf360d65eb50dfe841",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.696969696969697,
"repo_name": "jnewland/home-assistant",
"id": "137a3975b99e343dd92232000fb9c5600abcd515",
"size": "33",
"binary": false,
"copies": "15",
"ref": "refs/heads/ci",
"path": "homeassistant/components/synologydsm/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15240512"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
}
|
import datetime
import time
try:
from pysqlcipher.libsqlite import *
except ImportError:
from pysqlcipher._sqlite import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = buffer
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int(timepart_full[1])
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
|
{
"content_hash": "6f843495994aac5e1528b3fc9f47bf53",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 88,
"avg_line_length": 24.61764705882353,
"alnum_prop": 0.6678614097968937,
"repo_name": "hoffmabc/OpenBazaar",
"id": "78f340eeb6503e4d2825b674a8d6549f0f692254",
"size": "2766",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysqlcipher/lib/dbapi2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "172971"
},
{
"name": "C++",
"bytes": "4357"
},
{
"name": "CSS",
"bytes": "8930"
},
{
"name": "JavaScript",
"bytes": "106795"
},
{
"name": "Perl",
"bytes": "653"
},
{
"name": "Python",
"bytes": "486242"
},
{
"name": "Shell",
"bytes": "29516"
}
],
"symlink_target": ""
}
|
import os
from os.path import normpath
from unittest import mock
from mitmproxy.tools.console import pathedit
from mitmproxy.test import tutils
class TestPathCompleter:
def test_lookup_construction(self):
c = pathedit._PathCompleter()
cd = os.path.normpath(tutils.test_data.path("mitmproxy/completion"))
ca = os.path.join(cd, "a")
assert c.complete(ca).endswith(normpath("/completion/aaa"))
assert c.complete(ca).endswith(normpath("/completion/aab"))
c.reset()
ca = os.path.join(cd, "aaa")
assert c.complete(ca).endswith(normpath("/completion/aaa"))
assert c.complete(ca).endswith(normpath("/completion/aaa"))
c.reset()
assert c.complete(cd).endswith(normpath("/completion/aaa"))
def test_completion(self):
c = pathedit._PathCompleter(True)
c.reset()
c.lookup = [
("a", "x/a"),
("aa", "x/aa"),
]
assert c.complete("a") == "a"
assert c.final == "x/a"
assert c.complete("a") == "aa"
assert c.complete("a") == "a"
c = pathedit._PathCompleter(True)
r = c.complete("l")
assert c.final.endswith(r)
c.reset()
assert c.complete("/nonexistent") == "/nonexistent"
assert c.final == "/nonexistent"
c.reset()
assert c.complete("~") != "~"
c.reset()
s = "thisisatotallynonexistantpathforsure"
assert c.complete(s) == s
assert c.final == s
class TestPathEdit:
def test_keypress(self):
pe = pathedit.PathEdit("", "")
with mock.patch('urwid.widget.Edit.get_edit_text') as get_text, \
mock.patch('urwid.widget.Edit.set_edit_text') as set_text:
cd = os.path.normpath(tutils.test_data.path("mitmproxy/completion"))
get_text.return_value = os.path.join(cd, "a")
# Pressing tab should set completed path
pe.keypress((1,), "tab")
set_text_called_with = set_text.call_args[0][0]
assert set_text_called_with.endswith(normpath("/completion/aaa"))
# Pressing any other key should reset
pe.keypress((1,), "a")
assert pe.lookup is None
|
{
"content_hash": "91b3b69096b24761200436fc6e6d9230",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 31.23611111111111,
"alnum_prop": 0.5775900400177857,
"repo_name": "StevenVanAcker/mitmproxy",
"id": "b9f51f5aa510f9c75303cb79b7a2d02578b59aaf",
"size": "2249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/mitmproxy/tools/console/test_pathedit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20922"
},
{
"name": "HTML",
"bytes": "8617"
},
{
"name": "JavaScript",
"bytes": "276302"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1657512"
},
{
"name": "Shell",
"bytes": "4644"
}
],
"symlink_target": ""
}
|
from qds_sdk.qubole import Qubole
from qds_sdk.sensors import FileSensor, PartitionSensor
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class QuboleSensor(BaseSensorOperator):
"""
Base class for all Qubole Sensors
:param qubole_conn_id: The qubole connection to run the sensor against
:type qubole_conn_id: string
:param data: a JSON object containing payload, whose presence needs to be checked
:type data: a JSON object
.. note:: Both ``data`` and ``qubole_conn_id`` fields are template-supported. You can
also use ``.txt`` files for template driven use cases.
"""
template_fields = ('data', 'qubole_conn_id')
template_ext = ('.txt',)
@apply_defaults
def __init__(self, data, qubole_conn_id="qubole_default", *args, **kwargs):
self.data = data
self.qubole_conn_id = qubole_conn_id
if 'poke_interval' in kwargs and kwargs['poke_interval'] < 5:
raise AirflowException("Sorry, poke_interval can't be less than 5 sec for "
"task '{0}' in dag '{1}'."
.format(kwargs['task_id'], kwargs['dag'].dag_id))
super(QuboleSensor, self).__init__(*args, **kwargs)
def poke(self, context):
conn = BaseHook.get_connection(self.qubole_conn_id)
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.log.info('Poking: %s', self.data)
status = False
try:
status = self.sensor_class.check(self.data)
except Exception as e:
self.log.exception(e)
status = False
self.log.info('Status of this Poke: %s', status)
return status
class QuboleFileSensor(QuboleSensor):
@apply_defaults
def __init__(self, *args, **kwargs):
self.sensor_class = FileSensor
super(QuboleFileSensor, self).__init__(*args, **kwargs)
class QubolePartitionSensor(QuboleSensor):
@apply_defaults
def __init__(self, *args, **kwargs):
self.sensor_class = PartitionSensor
super(QubolePartitionSensor, self).__init__(*args, **kwargs)
|
{
"content_hash": "e921859b9a15d640125db3f2ab06fec0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 89,
"avg_line_length": 33.18840579710145,
"alnum_prop": 0.6406113537117903,
"repo_name": "yk5/incubator-airflow",
"id": "d67fa8557e7ad3fc0db99208f6e72a2b26cb8865",
"size": "3102",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "airflow/contrib/sensors/qubole_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "274912"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3877246"
},
{
"name": "Shell",
"bytes": "47007"
}
],
"symlink_target": ""
}
|
from PIL import Image
from PIL import ImageFile
from io import BytesIO
from PIL import _webp
_VALID_WEBP_MODES = {
"RGB": True,
"RGBA": True,
}
_VP8_MODES_BY_IDENTIFIER = {
b"VP8 ": "RGB",
b"VP8X": "RGBA",
b"VP8L": "RGBA", # lossless
}
def _accept(prefix):
is_riff_file_format = prefix[:4] == b"RIFF"
is_webp_file = prefix[8:12] == b"WEBP"
is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER
return is_riff_file_format and is_webp_file and is_valid_vp8_mode
class WebPImageFile(ImageFile.ImageFile):
format = "WEBP"
format_description = "WebP image"
def _open(self):
data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode(self.fp.read())
self.info["icc_profile"] = icc_profile
self.info["exif"] = exif
self.size = width, height
self.fp = BytesIO(data)
self.tile = [("raw", (0, 0) + self.size, 0, self.mode)]
def _getexif(self):
from PIL.JpegImagePlugin import _getexif
return _getexif(self)
def _save(im, fp, filename):
image_mode = im.mode
if im.mode not in _VALID_WEBP_MODES:
raise IOError("cannot write mode %s as WEBP" % image_mode)
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
icc_profile = im.encoderinfo.get("icc_profile", "")
exif = im.encoderinfo.get("exif", "")
data = _webp.WebPEncode(
im.tobytes(),
im.size[0],
im.size[1],
lossless,
float(quality),
im.mode,
icc_profile,
exif
)
if data is None:
raise IOError("cannot write file as WEBP (encoder returned None)")
fp.write(data)
Image.register_open("WEBP", WebPImageFile, _accept)
Image.register_save("WEBP", _save)
Image.register_extension("WEBP", ".webp")
Image.register_mime("WEBP", "image/webp")
|
{
"content_hash": "0820a1178e10504d9c1d40a03b6e01bd",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 92,
"avg_line_length": 24.636363636363637,
"alnum_prop": 0.6099103848181339,
"repo_name": "poojavade/Genomics_Docker",
"id": "90e2b540e64d7a4902ebc481d1ae886efd1fa7fa",
"size": "1897",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Pillow-2.3.0-py2.7-linux-x86_64.egg/PIL/WebPImagePlugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
}
|
import math
import os
import tensorflow as tf
from im2txt import configuration
from im2txt import inference_wrapper
from im2txt.inference_utils import caption_generator
from im2txt.inference_utils import vocabulary
# Directory containing model checkpoints.
CHECKPOINT_DIR="/home/oleg/Desktop/ImageCaptioning/im2txt/model"
# Vocabulary file generated by the preprocessing script.
VOCAB_FILE="/home/oleg/Desktop/ImageCaptioning/im2txt/im2txt/data/word_counts.txt"
# JPEG image file to caption. NOT USED
IMAGE_FILE="/home/oleg/Desktop/ImageCaptioning/im2txt/im2txt/data/images1.jpeg"
class imgCap(object):
def __init__(self):
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(), CHECKPOINT_DIR)
g.finalize()
# Create the vocabulary.
self.vocab = vocabulary.Vocabulary(VOCAB_FILE)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25, visible_device_list="0")
config=tf.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(graph=g, config = config)
restore_fn(self.sess)
self.generator = caption_generator.CaptionGenerator(model, self.vocab)
def feed_image(self, path):
fileGlob = tf.gfile.Glob(path)
f = tf.gfile.GFile(fileGlob[0], "r")
image = f.read()
f.close()
captions = self.generator.beam_search(self.sess, image)
answers = []
for i, caption in enumerate(captions):
# Ignore begin and end words.
sentence = [self.vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
answers.append(sentence)
print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
return answers[0]
|
{
"content_hash": "480110d75c5adbfda11cc907468c5f31",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 92,
"avg_line_length": 30.821428571428573,
"alnum_prop": 0.727694090382387,
"repo_name": "BAILOOL/Assistant-for-People-with-Low-Vision",
"id": "25ba8afd9809415683cf2a244acb513756ef6a6f",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Im2txt/imgCaptioning.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "44218"
},
{
"name": "C++",
"bytes": "1090404"
},
{
"name": "CMake",
"bytes": "4623"
},
{
"name": "Java",
"bytes": "1029149"
},
{
"name": "Lua",
"bytes": "29799"
},
{
"name": "Makefile",
"bytes": "11909"
},
{
"name": "Python",
"bytes": "231675"
},
{
"name": "Shell",
"bytes": "5893"
}
],
"symlink_target": ""
}
|
log_new = True
log_updated = True
log_unchanged = True
import os
import filecmp
jpeg_files = []
# Walk the hd directory to get a list of files in hd-textures.
for root, _, files in os.walk('hd'):
for f in files:
if f.endswith('.jpg'):
# Since this is a jpeg file, we want to consider it.
jpeg_files.append(os.path.join(root, f))
# Initiate and set our counters to 0.
new = 0
updated = 0
unchanged = 0
# Open output.txt for logging.
with open('output.txt', 'w+') as file:
for image_hd in jpeg_files:
image_master = image_hd.replace('hd', 'master')
if not os.path.isfile(image_master):
# This image doesn't exist in master. It must be new for hd-textures.
if log_new:
file.write("New file %s detected!\n" % image_hd)
new += 1
elif not filecmp.cmp(image_hd, image_master):
# This image exists, but has been modified. This indicates that this
# might possibly be a HD-ified texture!
if log_updated:
file.write("Updated file %s detected!\n" % image_hd)
updated += 1
else:
# The version in master is the exact same as in hd-textures. This means
# that the HD texture is already in master, or this texture hasn't been
# HD-ified yet.
if log_unchanged:
file.write("Unchanged file %s detected!\n" % image_hd)
unchanged += 1
print("A total of %d new files, %d updated files, and %d unchanged files." % (new, updated, unchanged))
percent_changed = ((updated+new)*1.0/len(jpeg_files))*100
print("%0.2f%% of all textures have been updated." % percent_changed)
|
{
"content_hash": "8515f9c191aec42d98cd0fd4087d490e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 103,
"avg_line_length": 36.53191489361702,
"alnum_prop": 0.6022131624927198,
"repo_name": "ttws4/RESOURCESFORNIGGERS",
"id": "d7454dd681db98993cb2f9bbefe59db7988af8fd",
"size": "2575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/compare.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "760"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 822389978719
Revises: None
Create Date: 2017-10-17 15:49:01.970182
"""
# revision identifiers, used by Alembic.
import sqlalchemy_utils
revision = '822389978719'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('keyvalue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('access_request',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('datasource_id', sa.Integer(), nullable=True),
sa.Column('datasource_type', sa.String(length=200), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('clusters',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('verbose_name', sa.String(length=250), nullable=True),
sa.Column('cluster_name', sa.String(length=250), nullable=True),
sa.Column('coordinator_host', sa.String(length=255), nullable=True),
sa.Column('coordinator_port', sa.Integer(), nullable=True),
sa.Column('coordinator_endpoint', sa.String(length=255), nullable=True),
sa.Column('broker_host', sa.String(length=255), nullable=True),
sa.Column('broker_port', sa.Integer(), nullable=True),
sa.Column('broker_endpoint', sa.String(length=255), nullable=True),
sa.Column('metadata_last_refreshed', sa.DateTime(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('cluster_name'),
sa.UniqueConstraint('verbose_name')
)
op.create_table('css_templates',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('template_name', sa.String(length=250), nullable=True),
sa.Column('css', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dashboards',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dashboard_title', sa.String(length=500), nullable=True),
sa.Column('position_json', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('css', sa.Text(), nullable=True),
sa.Column('json_metadata', sa.Text(), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug')
)
op.create_table('dbs',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('verbose_name', sa.String(length=250), nullable=True),
sa.Column('database_name', sa.String(length=250), nullable=True),
sa.Column('sqlalchemy_uri', sa.String(length=1024), nullable=True),
sa.Column('password', sqlalchemy_utils.types.encrypted.EncryptedType(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('select_as_create_table_as', sa.Boolean(), nullable=True),
sa.Column('expose_in_sqllab', sa.Boolean(), nullable=True),
sa.Column('allow_run_sync', sa.Boolean(), nullable=True),
sa.Column('allow_run_async', sa.Boolean(), nullable=True),
sa.Column('allow_ctas', sa.Boolean(), nullable=True),
sa.Column('allow_dml', sa.Boolean(), nullable=True),
sa.Column('force_ctas_schema', sa.String(length=250), nullable=True),
sa.Column('extra', sa.Text(), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('database_name'),
sa.UniqueConstraint('verbose_name')
)
op.create_table('favstar',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('class_name', sa.String(length=50), nullable=True),
sa.Column('obj_id', sa.Integer(), nullable=True),
sa.Column('dttm', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('logs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('action', sa.String(length=512), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.Column('json', sa.Text(), nullable=True),
sa.Column('dttm', sa.DateTime(), nullable=True),
sa.Column('dt', sa.Date(), nullable=True),
sa.Column('duration_ms', sa.Integer(), nullable=True),
sa.Column('referrer', sa.String(length=1024), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('slices',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('slice_name', sa.String(length=250), nullable=True),
sa.Column('datasource_id', sa.Integer(), nullable=True),
sa.Column('datasource_type', sa.String(length=200), nullable=True),
sa.Column('datasource_name', sa.String(length=2000), nullable=True),
sa.Column('viz_type', sa.String(length=250), nullable=True),
sa.Column('params', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('url',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dashboard_slices',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dashboard_id'], ['dashboards.id'], ),
sa.ForeignKeyConstraint(['slice_id'], ['slices.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dashboard_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dashboard_id'], ['dashboards.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('datasources',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('default_endpoint', sa.Text(), nullable=True),
sa.Column('is_featured', sa.Boolean(), nullable=True),
sa.Column('filter_select_enabled', sa.Boolean(), nullable=True),
sa.Column('offset', sa.Integer(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('params', sa.String(length=1000), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('datasource_name', sa.String(length=255), nullable=True),
sa.Column('is_hidden', sa.Boolean(), nullable=True),
sa.Column('fetch_values_from', sa.String(length=100), nullable=True),
sa.Column('cluster_name', sa.String(length=250), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['cluster_name'], ['clusters.cluster_name'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('datasource_name')
)
op.create_table('query',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.String(length=11), nullable=False),
sa.Column('database_id', sa.Integer(), nullable=False),
sa.Column('tmp_table_name', sa.String(length=256), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.Column('tab_name', sa.String(length=256), nullable=True),
sa.Column('sql_editor_id', sa.String(length=256), nullable=True),
sa.Column('schema', sa.String(length=256), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('select_sql', sa.Text(), nullable=True),
sa.Column('executed_sql', sa.Text(), nullable=True),
sa.Column('limit', sa.Integer(), nullable=True),
sa.Column('limit_used', sa.Boolean(), nullable=True),
sa.Column('select_as_cta', sa.Boolean(), nullable=True),
sa.Column('select_as_cta_used', sa.Boolean(), nullable=True),
sa.Column('progress', sa.Integer(), nullable=True),
sa.Column('rows', sa.Integer(), nullable=True),
sa.Column('error_message', sa.Text(), nullable=True),
sa.Column('results_key', sa.String(length=64), nullable=True),
sa.Column('start_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('start_running_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('end_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('end_result_backend_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('tracking_url', sa.Text(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['database_id'], ['dbs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('client_id')
)
op.create_index(op.f('ix_query_results_key'), 'query', ['results_key'], unique=False)
op.create_index('ti_user_id_changed_on', 'query', ['user_id', 'changed_on'], unique=False)
op.create_table('saved_query',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('db_id', sa.Integer(), nullable=True),
sa.Column('schema', sa.String(length=128), nullable=True),
sa.Column('label', sa.String(length=256), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['db_id'], ['dbs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('slice_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['slice_id'], ['slices.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tables',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('default_endpoint', sa.Text(), nullable=True),
sa.Column('is_featured', sa.Boolean(), nullable=True),
sa.Column('filter_select_enabled', sa.Boolean(), nullable=True),
sa.Column('offset', sa.Integer(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('params', sa.String(length=1000), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('table_name', sa.String(length=250), nullable=True),
sa.Column('main_dttm_col', sa.String(length=250), nullable=True),
sa.Column('database_id', sa.Integer(), nullable=False),
sa.Column('fetch_values_predicate', sa.String(length=1000), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('schema', sa.String(length=255), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['database_id'], ['dbs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('database_id', 'schema', 'table_name', name='_customer_location_uc')
)
op.create_table('columns',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('column_name', sa.String(length=255), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('type', sa.String(length=32), nullable=True),
sa.Column('groupby', sa.Boolean(), nullable=True),
sa.Column('count_distinct', sa.Boolean(), nullable=True),
sa.Column('sum', sa.Boolean(), nullable=True),
sa.Column('avg', sa.Boolean(), nullable=True),
sa.Column('max', sa.Boolean(), nullable=True),
sa.Column('min', sa.Boolean(), nullable=True),
sa.Column('filterable', sa.Boolean(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('datasource_name', sa.String(length=255), nullable=True),
sa.Column('dimension_spec_json', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['datasource_name'], ['datasources.datasource_name'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('metrics',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('metric_name', sa.String(length=512), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('metric_type', sa.String(length=32), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('is_restricted', sa.Boolean(), nullable=True),
sa.Column('d3format', sa.String(length=128), nullable=True),
sa.Column('datasource_name', sa.String(length=255), nullable=True),
sa.Column('json', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['datasource_name'], ['datasources.datasource_name'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('sql_metrics',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('metric_name', sa.String(length=512), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('metric_type', sa.String(length=32), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('is_restricted', sa.Boolean(), nullable=True),
sa.Column('d3format', sa.String(length=128), nullable=True),
sa.Column('table_id', sa.Integer(), nullable=True),
sa.Column('expression', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['table_id'], ['tables.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('table_columns',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('column_name', sa.String(length=255), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('type', sa.String(length=32), nullable=True),
sa.Column('groupby', sa.Boolean(), nullable=True),
sa.Column('count_distinct', sa.Boolean(), nullable=True),
sa.Column('sum', sa.Boolean(), nullable=True),
sa.Column('avg', sa.Boolean(), nullable=True),
sa.Column('max', sa.Boolean(), nullable=True),
sa.Column('min', sa.Boolean(), nullable=True),
sa.Column('filterable', sa.Boolean(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('table_id', sa.Integer(), nullable=True),
sa.Column('is_dttm', sa.Boolean(), nullable=True),
sa.Column('expression', sa.Text(), nullable=True),
sa.Column('python_date_format', sa.String(length=255), nullable=True),
sa.Column('database_expression', sa.String(length=255), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['table_id'], ['tables.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('table_columns')
op.drop_table('sql_metrics')
op.drop_table('metrics')
op.drop_table('columns')
op.drop_table('tables')
op.drop_table('slice_user')
op.drop_table('saved_query')
op.drop_index('ti_user_id_changed_on', table_name='query')
op.drop_index(op.f('ix_query_results_key'), table_name='query')
op.drop_table('query')
op.drop_table('datasources')
op.drop_table('dashboard_user')
op.drop_table('dashboard_slices')
op.drop_table('url')
op.drop_table('slices')
op.drop_table('logs')
op.drop_table('favstar')
op.drop_table('dbs')
op.drop_table('dashboards')
op.drop_table('css_templates')
op.drop_table('clusters')
op.drop_table('access_request')
op.drop_table('keyvalue')
# ### end Alembic commands ###
|
{
"content_hash": "136a462409c7b0b447dad26428d00aca",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 94,
"avg_line_length": 51.3189448441247,
"alnum_prop": 0.6564018691588785,
"repo_name": "lina9527/easybi",
"id": "d18a3920b4a38fe3503028b5c7567169f4a55886",
"size": "21400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/822389978719_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76785"
},
{
"name": "HTML",
"bytes": "3048686"
},
{
"name": "JavaScript",
"bytes": "635778"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "566451"
},
{
"name": "Shell",
"bytes": "326"
}
],
"symlink_target": ""
}
|
"""
raven.contrib.django.handlers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from raven.handlers.logging import SentryHandler as BaseSentryHandler
class SentryHandler(BaseSentryHandler):
def __init__(self):
logging.Handler.__init__(self)
def _get_client(self):
from raven.contrib.django.models import client
return client
client = property(_get_client)
def _emit(self, record):
from raven.contrib.django.middleware import SentryLogMiddleware
# Fetch the request from a threadlocal variable, if available
request = getattr(record, 'request', getattr(SentryLogMiddleware.thread, 'request', None))
return super(SentryHandler, self)._emit(record, request=request)
|
{
"content_hash": "419af02943a9850ba3ac01aa19fcdddc",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 98,
"avg_line_length": 28.25,
"alnum_prop": 0.6891592920353983,
"repo_name": "alex/raven",
"id": "275aa2769d16647d3716eb91043949de447e937c",
"size": "904",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "raven/contrib/django/handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "216588"
}
],
"symlink_target": ""
}
|
"""
Created on 02/02/2014
@author: Miguel Otero
"""
class Value(object):
"""
classdocs
"""
MISSING = "obsStatus-M"
AVAILABLE = "obsStatus-A"
FLOAT = "float"
INTEGER = "int"
STRING = "str"
def __init__(self, value=None, value_type=None, obs_status=None):
"""
Constructor
"""
self.value = value
self._value_type = value_type
self._obs_status = obs_status
def __str__(self):
return str(self.value) +" (type="+str(self._value_type)+")"
def __get_value_type(self):
return self._value_type
def __set_value_type(self, value_type):
if value_type == self.INTEGER or value_type == self.FLOAT or value_type == self.STRING: # FIXME change for in []
self._value_type = value_type
else:
raise ValueError("Value type not in the given ones")
value_type = property(fget=__get_value_type,
fset=__set_value_type,
doc="The value type of the value")
def __get_obs_status(self):
return self._obs_status
def __set_obs_status(self, obs_status):
if obs_status == self.MISSING or obs_status == self.AVAILABLE:
self._obs_status = obs_status
else:
raise ValueError("Observation status not in the given ones")
obs_status = property(fget=__get_obs_status,
fset=__set_obs_status,
doc="The Status of the value")
|
{
"content_hash": "31288d121e602f3b3152b88e3b41f924",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 120,
"avg_line_length": 27.945454545454545,
"alnum_prop": 0.5406636304489265,
"repo_name": "landportal/landbook-importers",
"id": "588fc90b77f51e17070ed3789ca2cde56611867c",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LandPortalEntities/lpentities/value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "518503"
},
{
"name": "Shell",
"bytes": "15185"
}
],
"symlink_target": ""
}
|
"""Starter script for Cinder Scheduler."""
import eventlet
eventlet.monkey_patch()
import sys
import warnings
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder.openstack.common import log as logging
from cinder import service
from cinder import utils
from cinder import version
CONF = cfg.CONF
def main():
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup("cinder")
utils.monkey_patch()
server = service.Service.create(binary='cinder-scheduler')
service.serve(server)
service.wait()
|
{
"content_hash": "c82b57ee83f3e771c1bacd7468dd38cb",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 62,
"avg_line_length": 21.058823529411764,
"alnum_prop": 0.7430167597765364,
"repo_name": "Accelerite/cinder",
"id": "8ef5b37b3ed5c5edc40048a43fb25182adf0d93e",
"size": "1470",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/cmd/scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10152545"
},
{
"name": "Shell",
"bytes": "9905"
}
],
"symlink_target": ""
}
|
"""
Mixins that provide inheriting classes with the basic columns and
relationships for non-DB storage of files.
``StoredFileMixin`` defines a pointer to a "physical" file that exists in some
kind of storage backend. As for ``VersionedResourceMixin``, it allows do define
an abstract resource with multiple versions (files). This allows for (very)
rudimentary version control. Inheriting classes will be responsible for
specifying, among others, which storage backend to use.
"""
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import column_property
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime
from indico.core.storage.backend import get_storage
from indico.util.date_time import now_utc
class VersionedResourceMixin:
# Class that will inherit from StoredFileMixin
stored_file_class = None
# Foreign key that, from such a class, will point here
stored_file_fkey = None
@staticmethod
def _add_file_to_relationship(target, value, *unused):
if value is None:
# we don't allow file<->link conversions so setting it to None is pointless
# and would just break integrity
raise ValueError('file cannot be set to None')
with db.session.no_autoflush:
target.all_files.append(value)
@classmethod
def register_versioned_resource_events(cls):
"""Register SQLAlchemy events.
Should be called right after class definition.
"""
listen(cls.file, 'set', cls._add_file_to_relationship)
#: The ID of the latest file for a file resource
@declared_attr
def file_id(cls):
return db.Column(
db.Integer,
db.ForeignKey(cls.stored_file_class.__table__.fullname + '.id', use_alter=True),
nullable=True
)
#: The currently active file for the resource
@declared_attr
def file(cls):
return db.relationship(
cls.stored_file_class,
primaryjoin=lambda: cls.file_id == cls.stored_file_class.id,
foreign_keys=lambda: cls.file_id,
lazy=False,
post_update=True
)
#: The list of all files for the resource
@declared_attr
def all_files(cls):
return db.relationship(
cls.stored_file_class,
primaryjoin=lambda: cls.id == getattr(cls.stored_file_class, cls.stored_file_fkey),
foreign_keys=lambda: getattr(cls.stored_file_class, cls.stored_file_fkey),
lazy=True,
cascade='all, delete, delete-orphan',
order_by=lambda: cls.stored_file_class.created_dt.desc(),
backref=db.backref(
getattr(cls.stored_file_class, 'version_of'),
lazy=False
)
)
class StoredFileMixin:
#: Name of attribute (backref) that will be made to point
#: to the versioned resource (leave as ``None`` if you
#: don't want versioning)
version_of = None
#: Whether to track the creation time. This is required when
#: using versioning!
add_file_date_column = True
#: Whether a row must always contain a file
file_required = True
@declared_attr
def filename(cls):
"""The name of the file."""
return db.Column(
db.String,
nullable=not cls.file_required
)
@declared_attr
def extension(cls):
"""The extension of the file."""
return column_property(db.func.regexp_replace(cls.filename, r'^.*\.', ''), deferred=True)
@declared_attr
def content_type(cls):
"""The MIME type of the file."""
return db.Column(
db.String,
nullable=not cls.file_required
)
@declared_attr
def size(cls):
"""The size of the file (in bytes).
Automatically assigned when `save()` is called.
"""
return db.Column(
db.BigInteger,
nullable=not cls.file_required
)
@declared_attr
def md5(cls):
"""An MD5 hash of the file.
Automatically assigned when `save()` is called.
"""
return db.Column(
db.String,
nullable=not cls.file_required
)
@declared_attr
def storage_backend(cls):
return db.Column(
db.String,
nullable=not cls.file_required
)
@declared_attr
def storage_file_id(cls):
return db.Column(
db.String,
nullable=not cls.file_required
)
@declared_attr
def created_dt(cls):
"""The date/time when the file was uploaded."""
if not cls.add_file_date_column:
return None
return db.Column(
UTCDateTime,
nullable=not cls.file_required,
default=now_utc
)
@property
def storage(self):
"""The Storage object used to store the file."""
if self.storage_backend is None:
raise RuntimeError('No storage backend set')
return get_storage(self.storage_backend)
def get_local_path(self):
"""Return context manager that will yield physical path.
This should be avoided in favour of using the actual file contents.
"""
return self.storage.get_local_path(self.storage_file_id)
def _build_storage_path(self):
"""
Should return a tuple containing the name of the storage backend
to use and the actual path of that will be used to store the resource
using the former.
"""
raise NotImplementedError
def save(self, data):
"""Save a file in the file storage.
This requires the AttachmentFile to be associated with
an Attachment which needs to be associated with a Folder since
the data from these objects is needed to generate the path
used to store the file.
:param data: bytes or a file-like object
"""
assert self.storage_backend is None and self.storage_file_id is None and self.size is None
if self.version_of:
assert getattr(self, self.version_of) is not None
self.storage_backend, path = self._build_storage_path()
self.storage_file_id, self.md5 = self.storage.save(path, self.content_type, self.filename, data)
self.size = self.storage.getsize(self.storage_file_id)
def open(self):
"""Return the stored file as a file-like object."""
if self.storage_file_id is None:
raise Exception('There is no file to open')
return self.storage.open(self.storage_file_id)
def send(self, inline=True):
"""Send the file to the user."""
if self.storage_file_id is None:
raise Exception('There is no file to send')
return self.storage.send_file(self.storage_file_id, self.content_type, self.filename, inline=inline)
def delete(self, delete_from_db=False):
"""Delete the file from storage."""
if self.storage_file_id is None:
raise Exception('There is no file to delete')
storage = self.storage
storage_file_id = self.storage_file_id
if delete_from_db:
db.session.delete(self)
db.session.flush()
else:
self.storage_backend = None
self.storage_file_id = None
self.size = None
self.content_type = None
self.filename = None
storage.delete(storage_file_id)
|
{
"content_hash": "1ebf5d3f0a283655c54abff358758002",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 108,
"avg_line_length": 33.1359649122807,
"alnum_prop": 0.6181336863004633,
"repo_name": "pferreir/indico",
"id": "8dffde656cbc59c9fbcae7a6840677ec7cb915da",
"size": "7769",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/core/storage/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
"""
Support for ZoneMinder.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zoneminder/
"""
import logging
from urllib.parse import urljoin
import requests
import voluptuous as vol
from homeassistant.const import (
CONF_PATH, CONF_HOST, CONF_SSL, CONF_PASSWORD, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_PATH_ZMS = 'path_zms'
DEFAULT_PATH = '/zm/'
DEFAULT_PATH_ZMS = '/zm/cgi-bin/nph-zms'
DEFAULT_SSL = False
DEFAULT_TIMEOUT = 10
DOMAIN = 'zoneminder'
LOGIN_RETRIES = 2
ZM = {}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
# This should match PATH_ZMS in ZoneMinder settings.
vol.Optional(CONF_PATH_ZMS, default=DEFAULT_PATH_ZMS): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the ZoneMinder component."""
global ZM
ZM = {}
conf = config[DOMAIN]
if conf[CONF_SSL]:
schema = 'https'
else:
schema = 'http'
server_origin = '{}://{}'.format(schema, conf[CONF_HOST])
url = urljoin(server_origin, conf[CONF_PATH])
username = conf.get(CONF_USERNAME, None)
password = conf.get(CONF_PASSWORD, None)
ZM['server_origin'] = server_origin
ZM['url'] = url
ZM['username'] = username
ZM['password'] = password
ZM['path_zms'] = conf.get(CONF_PATH_ZMS)
hass.data[DOMAIN] = ZM
return login()
# pylint: disable=no-member
def login():
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if ZM['username']:
login_post['username'] = ZM['username']
if ZM['password']:
login_post['password'] = ZM['password']
req = requests.post(ZM['url'] + '/index.php', data=login_post)
ZM['cookies'] = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api call.
req = requests.get(
ZM['url'] + 'api/host/getVersion.json', cookies=ZM['cookies'],
timeout=DEFAULT_TIMEOUT)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def _zm_request(method, api_url, data=None):
"""Perform a Zoneminder request."""
# Since the API uses sessions that expire, sometimes we need to re-auth
# if the call fails.
for _ in range(LOGIN_RETRIES):
req = requests.request(
method, urljoin(ZM['url'], api_url), data=data,
cookies=ZM['cookies'], timeout=DEFAULT_TIMEOUT)
if not req.ok:
login()
else:
break
else:
_LOGGER.exception("Unable to get API response from ZoneMinder")
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while attempting to '
'decode "%s"', req.text)
# pylint: disable=no-member
def get_state(api_url):
"""Get a state from the ZoneMinder API service."""
return _zm_request('get', api_url)
# pylint: disable=no-member
def change_state(api_url, post_data):
"""Update a state using the Zoneminder API."""
return _zm_request('post', api_url, data=post_data)
|
{
"content_hash": "d5267e1edcc18e4de2aa6d4ddcff39a1",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 78,
"avg_line_length": 27.853846153846153,
"alnum_prop": 0.6396023198011599,
"repo_name": "JshWright/home-assistant",
"id": "8870b4713e0312523efbda0a6292ec89ec56bc46",
"size": "3621",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zoneminder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1808411"
},
{
"name": "Python",
"bytes": "6070409"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15525"
}
],
"symlink_target": ""
}
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent6000 import *
class agilentMSO6104A(agilent6000):
"Agilent InfiniiVision MSO6104A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO6104A')
super(agilentMSO6104A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._init_channels()
|
{
"content_hash": "afd0c9cbab8062486f9ed8ec8c887615",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 38.29545454545455,
"alnum_prop": 0.7364985163204748,
"repo_name": "python-ivi/python-ivi",
"id": "cd3c8d6a3bd8cc887bde32c5f7be9b3f649d63cc",
"size": "1685",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilentMSO6104A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1961300"
}
],
"symlink_target": ""
}
|
"""Classification Model Class for Estimating Propensity Score and Importance Weight."""
from dataclasses import dataclass
from typing import Optional
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import clone
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import KFold
from sklearn.utils import check_random_state
from sklearn.utils import check_scalar
from ..utils import check_array
from ..utils import check_bandit_feedback_inputs
from ..utils import sample_action_fast
@dataclass
class ImportanceWeightEstimator(BaseEstimator):
"""Machine learning model to estimate the importance weights induced by the behavior and evaluation policies leveraging classifier-based density ratio estimation.
Parameters
------------
base_model: BaseEstimator
A machine learning model used to estimate the importance weights.
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
action_context: array-like, shape (n_actions, dim_action_context), default=None
Context vectors characterizing actions (i.e., a vector representation or an embedding of each action).
If None, one-hot encoding of the action variable is used as default.
If fitting_method is 'raw', one-hot encoding will be used as action_context.
fitting_method: str, default='sample'
Method to fit the classification model.
Must be one of ['sample', 'raw']. Each method is defined as follows:
- sample: actions are sampled from behavior and evaluation policies, respectively.
- raw: action_dist_at_pos are directly encoded as action features.
If fitting_method is 'raw', one-hot encoding will be used as action_context.
calibration_cv: int, default=2
Number of folds in the calibration procedure.
If calibration_cv <= 1, classification model is not calibrated.
References
-----------
Arjun Sondhi, David Arbour, and Drew Dimmery
"Balanced Off-Policy Evaluation in General Action Spaces.", 2020.
"""
base_model: BaseEstimator
n_actions: int
len_list: int = 1
action_context: Optional[np.ndarray] = None
fitting_method: str = "sample"
calibration_cv: int = 2
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(self.n_actions, "n_actions", int, min_val=2)
check_scalar(self.len_list, "len_list", int, min_val=1)
check_scalar(self.calibration_cv, "calibration_cv", int)
if not (
isinstance(self.fitting_method, str)
and self.fitting_method in ["sample", "raw"]
):
raise ValueError(
f"`fitting_method` must be either 'sample' or 'raw', but {self.fitting_method} is given"
)
if not isinstance(self.base_model, BaseEstimator):
raise ValueError(
"`base_model` must be BaseEstimator or a child class of BaseEstimator"
)
if self.calibration_cv > 1:
self.base_model_list = [
clone(
CalibratedClassifierCV(
base_estimator=self.base_model, cv=self.calibration_cv
),
)
for _ in np.arange(self.len_list)
]
else:
self.base_model_list = [
clone(self.base_model) for _ in np.arange(self.len_list)
]
if self.action_context is None or self.fitting_method == "raw":
self.action_context = np.eye(self.n_actions, dtype=int)
def fit(
self,
context: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
random_state: Optional[int] = None,
) -> None:
"""Fit the classification model on given logged bandit data.
Parameters
----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a classification model assumes that there is only a single position in a recommendation interface.
When `len_list` > 1, an array must be given as `position`.
random_state: int, default=None
`random_state` affects the sampling of actions from the evaluation policy.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=np.zeros_like(action), # use dummy reward
position=position,
action_context=self.action_context,
)
check_array(array=action_dist, name="action_dist", expected_dim=3)
n = context.shape[0]
if position is None or self.len_list == 1:
position = np.zeros_like(action)
else:
check_array(array=position, name="position", expected_dim=1)
if position.max() >= self.len_list:
raise ValueError(
f"`position` elements must be smaller than `len_list`, but the maximum value is {position.max()} (>= {self.len_list})"
)
if action_dist.shape != (n, self.n_actions, self.len_list):
raise ValueError(
f"shape of `action_dist` must be (n_rounds, n_actions, len_list)=({n, self.n_actions, self.len_list}), but is {action_dist.shape}"
)
if not np.allclose(action_dist.sum(axis=1), 1):
raise ValueError("`action_dist` must be a probability distribution")
# If self.fitting_method != "sample", `sampled_action` has no information
sampled_action = np.zeros(n, dtype=int)
if self.fitting_method == "sample":
for pos_ in np.arange(self.len_list):
idx = position == pos_
sampled_action_at_position = sample_action_fast(
action_dist=action_dist[idx][:, :, pos_],
random_state=random_state,
)
sampled_action[idx] = sampled_action_at_position
for pos_ in np.arange(self.len_list):
idx = position == pos_
action_dist_at_pos = action_dist[idx][:, :, pos_]
X, y = self._pre_process_for_clf_model(
context=context[idx],
action=action[idx],
action_dist_at_pos=action_dist_at_pos,
sampled_action_at_position=sampled_action[idx],
)
if X.shape[0] == 0:
raise ValueError(f"No training data at position {pos_}")
self.base_model_list[pos_].fit(X, y)
def predict(
self,
context: np.ndarray,
action: np.ndarray,
position: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Predict the importance weights.
Parameters
----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds_of_new_data,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
position: array-like, shape (n_rounds_of_new_data,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a classification model assumes that there is only a single position in a recommendation interface.
When `len_list` > 1, an array must be given as `position`.
Returns
----------
estimated_importance_weights: array-like, shape (n_rounds_of_new_data, )
Importance weights estimated via supervised classification, i.e., :math:`\\hat{w}(x_t, a_t)`.
"""
proba_eval_policy = np.zeros(action.shape[0])
for pos_ in np.arange(self.len_list):
idx = position == pos_
X, _, = self._pre_process_for_clf_model(
context=context[idx],
action=action[idx],
is_prediction=True,
)
proba_eval_policy[idx] = self.base_model_list[pos_].predict_proba(X)[:, 1]
return proba_eval_policy / (1 - proba_eval_policy)
def fit_predict(
self,
context: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
n_folds: int = 1,
random_state: Optional[int] = None,
evaluate_model_performance: bool = False,
) -> np.ndarray:
"""Fit the classification model on given logged bandit data and predict the importance weights on the same data, possibly using cross-fitting to avoid over-fitting.
Note
------
When `n_folds` is larger than 1, the cross-fitting procedure is applied.
Parameters
----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a classification model assumes that there is only a single position in a recommendation interface.
When `len_list` > 1, an array must be given as `position`.
n_folds: int, default=1
Number of folds in the cross-fitting procedure.
When 1 is given, the classification model is trained on the whole logged bandit data.
Please refer to https://arxiv.org/abs/2002.08536 about the details of the cross-fitting procedure.
random_state: int, default=None
`random_state` affects the ordering of the indices, which controls the randomness of each fold.
See https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html for the details.
evaluate_model_performance: bool, default=False
Whether the performance of the classification model is evaluated or not.
If True, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`
Returns
-----------
estimated_importance_weights: array-like, shape (n_rounds_of_new_data, )
Importance weights estimated via supervised classification, i.e., :math:`\\hat{w}(x_t, a_t)`.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=np.zeros_like(action), # use dummy reward
position=position,
action_context=self.action_context,
)
n = context.shape[0]
if position is None or self.len_list == 1:
position = np.zeros_like(action)
else:
if position.max() >= self.len_list:
raise ValueError(
f"`position` elements must be smaller than `len_list`, but the maximum value is {position.max()} (>= {self.len_list})"
)
check_array(array=action_dist, name="action_dist", expected_dim=3)
check_scalar(n_folds, "n_folds", int, min_val=1)
check_random_state(random_state)
if action_dist.shape != (n, self.n_actions, self.len_list):
raise ValueError(
f"shape of `action_dist` must be (n_rounds, n_actions, len_list)=({n, self.n_actions, self.len_list}), but is {action_dist.shape}"
)
if not np.allclose(action_dist.sum(axis=1), 1):
raise ValueError("`action_dist` must be a probability distribution")
if n_folds == 1:
self.fit(
context=context,
action=action,
position=position,
action_dist=action_dist,
random_state=random_state,
)
return self.predict(context=context, action=action, position=position)
else:
estimated_importance_weights = np.zeros(n)
kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)
kf.get_n_splits(context)
if evaluate_model_performance:
self.eval_result = {"y": [], "proba": []}
for train_idx, test_idx in kf.split(context):
self.fit(
context=context[train_idx],
action=action[train_idx],
position=position[train_idx],
action_dist=action_dist[train_idx],
random_state=random_state,
)
estimated_importance_weights[test_idx] = self.predict(
context=context[test_idx],
action=action[test_idx],
position=position[test_idx],
)
if evaluate_model_performance:
sampled_action = np.zeros(test_idx.shape[0], dtype=int)
if self.fitting_method == "sample":
for pos_ in np.arange(self.len_list):
idx = position[test_idx] == pos_
sampled_action_at_position = sample_action_fast(
action_dist=action_dist[test_idx][idx][:, :, pos_],
random_state=random_state,
)
sampled_action[idx] = sampled_action_at_position
for pos_ in np.arange(self.len_list):
idx = position[test_idx] == pos_
action_dist_at_pos = action_dist[test_idx][idx][:, :, pos_]
X, y = self._pre_process_for_clf_model(
context=context[test_idx][idx],
action=action[test_idx][idx],
action_dist_at_pos=action_dist_at_pos,
sampled_action_at_position=sampled_action[idx],
)
proba_eval_policy = self.base_model_list[pos_].predict_proba(X)[
:, 1
]
self.eval_result["proba"].append(proba_eval_policy)
self.eval_result["y"].append(y)
return estimated_importance_weights
def _pre_process_for_clf_model(
self,
context: np.ndarray,
action: np.ndarray,
action_dist_at_pos: Optional[np.ndarray] = None,
sampled_action_at_position: Optional[np.ndarray] = None,
is_prediction: bool = False,
) -> np.ndarray:
"""Preprocess feature vectors and output labels to train a classification model.
Note
-----
Please override this method if you want to use another feature enginnering
for training the classification model.
Parameters
-----------
context: array-like, shape (n_rounds,)
Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist_at_pos: array-like, shape (n_rounds, n_actions,)
Action choice probabilities of the evaluation policy of each position (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
sampled_action_at_position: array-like, shape (n_rounds, n_actions,)
Actions sampled by evaluation policy for each data at each position.
"""
behavior_policy_feature = np.c_[context, self.action_context[action]]
if is_prediction:
return behavior_policy_feature, None
if self.fitting_method == "raw":
evaluation_policy_feature = np.c_[context, action_dist_at_pos]
elif self.fitting_method == "sample":
evaluation_policy_feature = np.c_[
context, self.action_context[sampled_action_at_position]
]
X = np.copy(behavior_policy_feature)
y = np.zeros(X.shape[0], dtype=int)
X = np.r_[X, evaluation_policy_feature]
y = np.r_[y, np.ones(evaluation_policy_feature.shape[0], dtype=int)]
return X, y
@dataclass
class PropensityScoreEstimator(BaseEstimator):
"""Machine learning model to estimate propensity scores (:math:`\\pi_{b}(a|x)`).
Parameters
------------
base_model: BaseEstimator
A machine learning model used to estimate the reward function.
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
calibration_cv: int, default=2
Number of folds in the calibration procedure.
If calibration_cv <= 1, calibration will not be applied.
References
-----------
Arjun Sondhi, David Arbour, and Drew Dimmery
"Balanced Off-Policy Evaluation in General Action Spaces.", 2020.
"""
base_model: BaseEstimator
n_actions: int
len_list: int = 1
calibration_cv: int = 2
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(self.n_actions, "n_actions", int, min_val=2)
check_scalar(self.len_list, "len_list", int, min_val=1)
check_scalar(self.calibration_cv, "calibration_cv", int)
if not isinstance(self.base_model, BaseEstimator):
raise ValueError(
"`base_model` must be BaseEstimator or a child class of BaseEstimator"
)
if self.calibration_cv > 1:
self.base_model_list = [
clone(
CalibratedClassifierCV(
base_estimator=self.base_model, cv=self.calibration_cv
),
)
for _ in np.arange(self.len_list)
]
else:
self.base_model_list = [
clone(self.base_model) for _ in np.arange(self.len_list)
]
def fit(
self,
context: np.ndarray,
action: np.ndarray,
position: Optional[np.ndarray] = None,
) -> None:
"""Fit the classification model on given logged bandit data.
Parameters
----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a classification model assumes that there is only a single position in a recommendation interface.
When `len_list` > 1, an array must be given as `position`.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=np.zeros_like(action), # use dummy reward
position=position,
action_context=np.eye(self.n_actions, dtype=int),
)
if position is None or self.len_list == 1:
position = np.zeros_like(action)
else:
if position.max() >= self.len_list:
raise ValueError(
f"`position` elements must be smaller than `len_list`, but the maximum value is {position.max()} (>= {self.len_list})"
)
for pos_ in np.arange(self.len_list):
idx = position == pos_
if context[idx].shape[0] == 0:
raise ValueError(f"No training data at position {pos_}")
self.base_model_list[pos_].fit(context[idx], action[idx])
def predict(
self,
context: np.ndarray,
action: np.ndarray,
position: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Predict the propensity scores.
Parameters
----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds_of_new_data,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
position: array-like, shape (n_rounds_of_new_data,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a classification model assumes that there is only a single position in a recommendation interface.
When `len_list` > 1, an array must be given as `position`.
Returns
----------
estimated_pscore: array-like, shape (n_rounds_of_new_data, )
Estimated propensity scores, i.e., :math:`\\hat{\\pi}_b (a \\mid x)`.
"""
estimated_pscore = np.zeros(action.shape[0])
for pos_ in np.arange(self.len_list):
idx = position == pos_
if context[idx].shape[0] == 0:
continue
estimated_pscore[idx] = self.base_model_list[pos_].predict_proba(
context[idx]
)[np.arange(action[idx].shape[0]), action[idx]]
return estimated_pscore
def fit_predict(
self,
context: np.ndarray,
action: np.ndarray,
position: Optional[np.ndarray] = None,
n_folds: int = 1,
random_state: Optional[int] = None,
evaluate_model_performance: bool = False,
) -> np.ndarray:
"""Fit the classification model on given logged bandit data and predict the propensity score on the same data, possibly using the cross-fitting procedure to avoid over-fitting.
Note
------
When `n_folds` is larger than 1, the cross-fitting procedure is applied.
Parameters
----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a classification model assumes that there is only a single position.
When `len_list` > 1, an array must be given as `position`.
n_folds: int, default=1
Number of folds in the cross-fitting procedure.
When 1 is given, the classification model is trained on the whole logged bandit data.
Please refer to https://arxiv.org/abs/2002.08536 about the details of the cross-fitting procedure.
random_state: int, default=None
`random_state` affects the ordering of the indices, which controls the randomness of each fold.
See https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html for the details.
evaluate_model_performance: bool, default=False
Whether the performance of the classification model is evaluated or not.
If True, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`
Returns
-----------
estimated_pscore: array-like, shape (n_rounds_of_new_data, )
Estimated propensity score, i.e., :math:`\\hat{\\pi}_b (a \\mid x)`.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=np.zeros_like(action), # use dummy reward
position=position,
action_context=np.eye(self.n_actions, dtype=int),
)
if position is None or self.len_list == 1:
position = np.zeros_like(action)
else:
if position.max() >= self.len_list:
raise ValueError(
f"`position` elements must be smaller than `len_list`, but the maximum value is {position.max()} (>= {self.len_list})"
)
check_scalar(n_folds, "n_folds", int, min_val=1)
check_random_state(random_state)
if n_folds == 1:
self.fit(
context=context,
action=action,
position=position,
)
return self.predict(context=context, action=action, position=position)
else:
estimated_pscore = np.zeros(context.shape[0])
kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)
kf.get_n_splits(context)
if evaluate_model_performance:
self.eval_result = {"y": [], "proba": []}
for train_idx, test_idx in kf.split(context):
self.fit(
context=context[train_idx],
action=action[train_idx],
position=position[train_idx],
)
estimated_pscore[test_idx] = self.predict(
context=context[test_idx],
action=action[test_idx],
position=position[test_idx],
)
if evaluate_model_performance:
for pos_ in np.arange(self.len_list):
idx = position[test_idx] == pos_
if context[test_idx][idx].shape[0] == 0:
continue
proba_eval = self.base_model_list[pos_].predict_proba(
context[test_idx][idx]
)
self.eval_result["proba"].append(proba_eval)
self.eval_result["y"].append(action[test_idx][idx])
return estimated_pscore
|
{
"content_hash": "8964cbd733286cdca7c84f61a8f6347f",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 184,
"avg_line_length": 42.86349206349206,
"alnum_prop": 0.5879869648940897,
"repo_name": "st-tech/zr-obp",
"id": "b2c307c933ed65a1bbda66280f8c2170011dda78",
"size": "27138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "obp/ope/classification_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1767845"
}
],
"symlink_target": ""
}
|
from nova import exception
from nova.network import model
from nova import test
from nova.tests import fake_network_cache_model
from nova.virt import netutils
class RouteTests(test.TestCase):
def test_create_route_with_attrs(self):
route = fake_network_cache_model.new_route()
ip = fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
self.assertEqual(route['cidr'], '0.0.0.0/24')
self.assertEqual(route['gateway']['address'], '192.168.1.1')
self.assertEqual(route['interface'], 'eth0')
def test_routes_equal(self):
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route()
self.assertEqual(route1, route2)
def test_routes_not_equal(self):
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route(dict(cidr='1.1.1.1/24'))
self.assertNotEqual(route1, route2)
def test_hydrate(self):
route = model.Route.hydrate(
{'gateway': fake_network_cache_model.new_ip(
dict(address='192.168.1.1'))})
self.assertEqual(route['cidr'], None)
self.assertEqual(route['gateway']['address'], '192.168.1.1')
self.assertEqual(route['interface'], None)
class FixedIPTests(test.TestCase):
def test_createnew_fixed_ip_with_attrs(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
self.assertEqual(fixed_ip['address'], '192.168.1.100')
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], 4)
def test_create_fixed_ipv6(self):
fixed_ip = model.FixedIP(address='::1')
self.assertEqual(fixed_ip['address'], '::1')
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], 6)
def test_create_fixed_bad_ip_fails(self):
self.assertRaises(exception.InvalidIpAddressError,
model.FixedIP,
address='picklespicklespickles')
def test_equate_two_fixed_ips(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::1')
self.assertEqual(fixed_ip, fixed_ip2)
def test_equate_two_dissimilar_fixed_ips_fails(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::2')
self.assertNotEqual(fixed_ip, fixed_ip2)
def test_hydrate(self):
fixed_ip = model.FixedIP.hydrate({})
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['address'], None)
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], None)
def test_add_floating_ip(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
def test_add_floating_ip_repeatedly_only_one_instance(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
for i in xrange(10):
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
class SubnetTests(test.TestCase):
def test_create_subnet_with_attrs(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
self.assertEqual(subnet['cidr'], '10.10.0.0/24')
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3'))])
self.assertEqual(subnet['routes'], [route1])
self.assertEqual(subnet['version'], 4)
def test_add_route(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
subnet.add_route(route2)
self.assertEqual(subnet['routes'], [route1, route2])
def test_add_route_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
for i in xrange(10):
subnet.add_route(route2)
self.assertEqual(subnet['routes'], [route1, route2])
def test_add_dns(self):
subnet = fake_network_cache_model.new_subnet()
dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
subnet.add_dns(dns)
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
def test_add_dns_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
subnet.add_dns(fake_network_cache_model.new_ip(
dict(address='9.9.9.9')))
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
def test_add_ip(self):
subnet = fake_network_cache_model.new_subnet()
subnet.add_ip(fake_network_cache_model.new_ip(
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))])
def test_add_ip_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
subnet.add_ip(fake_network_cache_model.new_ip(
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))])
def test_hydrate(self):
subnet_dict = {
'cidr': '255.255.255.0',
'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
'ips': [fake_network_cache_model.new_ip(dict(address='2.2.2.2'))],
'routes': [fake_network_cache_model.new_route()],
'version': 4,
'gateway': fake_network_cache_model.new_ip(
dict(address='3.3.3.3'))}
subnet = model.Subnet.hydrate(subnet_dict)
self.assertEqual(subnet['cidr'], '255.255.255.0')
self.assertEqual(subnet['dns'], [fake_network_cache_model.new_ip(
dict(address='1.1.1.1'))])
self.assertEqual(subnet['gateway']['address'], '3.3.3.3')
self.assertEqual(subnet['ips'], [fake_network_cache_model.new_ip(
dict(address='2.2.2.2'))])
self.assertEqual(subnet['routes'], [
fake_network_cache_model.new_route()])
self.assertEqual(subnet['version'], 4)
class NetworkTests(test.TestCase):
def test_create_network(self):
network = fake_network_cache_model.new_network()
self.assertEqual(network['id'], 1)
self.assertEqual(network['bridge'], 'br0')
self.assertEqual(network['label'], 'public')
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
def test_add_subnet(self):
network = fake_network_cache_model.new_network()
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
def test_add_subnet_a_lot(self):
network = fake_network_cache_model.new_network()
for i in xrange(10):
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
def test_hydrate(self):
new_network = dict(
id=1,
bridge='br0',
label='public',
subnets=[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
network = model.Network.hydrate(fake_network_cache_model.new_network())
self.assertEqual(network['id'], 1)
self.assertEqual(network['bridge'], 'br0')
self.assertEqual(network['label'], 'public')
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
class VIFTests(test.TestCase):
def test_create_vif(self):
vif = fake_network_cache_model.new_vif()
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
def test_create_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = fake_network_cache_model.new_vif(vif_dict)
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['type'], 'bridge')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
def test_vif_get_fixed_ips(self):
vif = fake_network_cache_model.new_vif()
fixed_ips = vif.fixed_ips()
ips = [fake_network_cache_model.new_ip(dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3'))] * 2
self.assertEqual(fixed_ips, ips)
def test_vif_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
floating_ips = vif.floating_ips()
self.assertEqual(floating_ips, ['192.168.1.1'])
def test_vif_get_labeled_ips(self):
vif = fake_network_cache_model.new_vif()
labeled_ips = vif.labeled_ips()
ip_dict = {
'network_id': 1,
'ips': [fake_network_cache_model.new_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3'})] * 2,
'network_label': 'public'}
self.assertEqual(labeled_ips, ip_dict)
def test_hydrate(self):
new_vif = dict(
id=1,
address='127.0.0.1',
network=fake_network_cache_model.new_network())
vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
def test_hydrate_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['type'], 'bridge')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
class NetworkInfoTests(test.TestCase):
def test_create_model(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3'})] * 4)
def test_create_async_model(self):
def async_wrapper():
return model.NetworkInfo(
[fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3'})] * 4)
def test_create_async_model_exceptions(self):
def async_wrapper():
raise test.TestingException()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.wait)
# 2nd one doesn't raise
self.assertEqual(None, ninfo.wait())
# Test that do_raise=False works on .wait()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertEqual(None, ninfo.wait(do_raise=False))
# Test we also raise calling a method
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.fixed_ips)
def test_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
ninfo = model.NetworkInfo([vif,
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.floating_ips(), ['192.168.1.1'])
def test_hydrate(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
model.NetworkInfo.hydrate(ninfo)
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3'})] * 4)
def _test_injected_network_template(self, should_inject, use_ipv6=False,
legacy=False):
"""Check that netutils properly decides whether to inject based on
whether the supplied subnet is static or dynamic.
"""
network = fake_network_cache_model.new_network({'subnets': []})
if should_inject:
network.add_subnet(fake_network_cache_model.new_subnet())
if use_ipv6:
gateway_ip = fake_network_cache_model.new_ip(dict(
address='1234:567::1'))
ip = fake_network_cache_model.new_ip(dict(
address='1234:567::2'))
subnet_dict = dict(
cidr='1234:567::/48',
gateway=gateway_ip,
ips=[ip])
network.add_subnet(fake_network_cache_model.new_subnet(
subnet_dict))
else:
subnet_dict = dict(dhcp_server='10.10.0.1')
network.add_subnet(fake_network_cache_model.new_subnet(
subnet_dict))
# Behave as though CONF.flat_injected is True
network['meta']['injected'] = True
vif = fake_network_cache_model.new_vif({'network': network})
ninfo = model.NetworkInfo([vif])
if legacy:
ninfo = ninfo.legacy()
template = netutils.get_injected_network_template(ninfo,
use_ipv6=use_ipv6)
# NOTE(bnemec): There is a bug with legacy network info that causes
# it to inject regardless of whether the network is static or dynamic.
# This can't be fixed without changes that would potentially break
# existing code, so until legacy network info goes away this test
# will just ignore the improper behavior.
if not should_inject and not legacy:
self.assertTrue(template is None)
else:
self.assertTrue('auto eth0' in template)
self.assertTrue('iface eth0 inet static' in template)
self.assertTrue('address 10.10.0.2' in template)
self.assertTrue('netmask 255.255.255.0' in template)
self.assertTrue('broadcast 10.10.0.255' in template)
self.assertTrue('gateway 10.10.0.1' in template)
self.assertTrue('dns-nameservers 1.2.3.4 2.3.4.5' in template)
if use_ipv6:
self.assertTrue('iface eth0 inet6 static' in template)
self.assertTrue('address 1234:567::2' in template)
self.assertTrue('netmask 48' in template)
self.assertTrue('gateway 1234:567::1' in template)
def test_injection_static(self):
self._test_injected_network_template(should_inject=True)
def test_injection_static_ipv6(self):
self._test_injected_network_template(should_inject=True, use_ipv6=True)
def test_injection_dynamic(self):
self._test_injected_network_template(should_inject=False)
def test_injection_static_legacy(self):
self._test_injected_network_template(should_inject=True, legacy=True)
def test_injection_static_ipv6_legacy(self):
self._test_injected_network_template(should_inject=True,
use_ipv6=True,
legacy=True)
def test_injection_dynamic_legacy(self):
self._test_injected_network_template(should_inject=False, legacy=True)
|
{
"content_hash": "522b3391ad822ff2fee6d94eaf12827c",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 79,
"avg_line_length": 44.236781609195404,
"alnum_prop": 0.5684144883853869,
"repo_name": "plumgrid/plumgrid-nova",
"id": "38a27b51c7a8a370d2aa1d89e2c0c99da85fcadc",
"size": "19951",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/network/test_network_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11944269"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
}
|
import os
import subprocess
from .base import CorePlatform
class LinuxPlatform(CorePlatform):
def create_shortcut(self, path, target):
os.symlink(target, path)
def add_to_path(self, path):
if path not in os.environ['PATH']:
line = 'export PATH=$PATH:{}'.format(path)
cmd = 'echo "{}" >> ~/.bashrc'.format(line)
subprocess.call(cmd, shell=True)
|
{
"content_hash": "82c23d49bb5c46ea341472a291fa4728",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 29.357142857142858,
"alnum_prop": 0.610705596107056,
"repo_name": "kalail/packer",
"id": "0bf9bee4df28c58ce254b7aa732d0a6164c6059a",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packer/platforms/linux.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19756"
},
{
"name": "Ruby",
"bytes": "194"
},
{
"name": "Shell",
"bytes": "6530"
}
],
"symlink_target": ""
}
|
import logging
import unittest
from config_test import build_client_from_configuration
_logger = logging.getLogger(__name__)
class TestSpaces(unittest.TestCase):
def test_list(self):
cpt = 0
client = build_client_from_configuration()
for space in client.v2.spaces.list(organization_guid=client.org_guid):
_logger.debug(" - %s" % space["entity"]["name"])
if cpt == 0:
space = client.v2.spaces.get(space["metadata"]["guid"])
self.assertIsNotNone(space)
space = client.v2.spaces.get_first(organization_guid=client.org_guid, name=space["entity"]["name"])
self.assertIsNotNone(space)
cpt += 1
_logger.debug("test spaces list - %d found", cpt)
|
{
"content_hash": "3312d22a15a1c7069b5fe4ccdef6c6f2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 115,
"avg_line_length": 37.04761904761905,
"alnum_prop": 0.6105398457583547,
"repo_name": "antechrestos/cf-python-client",
"id": "5d791c0a731432494d793702af055c4b40058cce",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration/v2/test_spaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "595"
},
{
"name": "Python",
"bytes": "122987"
}
],
"symlink_target": ""
}
|
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse, time
from operator import itemgetter
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api = RiotWatcher(f.read())
allChampionsUsed = []
f = open('loChampionPairs', 'r')
champions = f.read()
champions = champions.splitlines()
summoner_name_id_dict = {}
summoner_most_used_champ_dict = {}
def main():
global summoner_name_id_dict
global summoner_most_used_champ_dict
global allChampionsUsed
# Command line parsing
global inputLocation
global outputLocation
parser = argparse.ArgumentParser(description='Attempt to find every given summoners most used champion.')
parser.add_argument('-in', metavar='i', type=str)
parser.add_argument('-out', metavar='o', type=str)
args = parser.parse_args()
print vars(args).values()
outputLocation = vars(args).values()[0]
inputLocation = vars(args).values()[1]
# Check if we have API calls remaining
createDictOfSummoners()
print len(summoner_name_id_dict)
# For every key in the dict, get the summoner stats
for k, v in summoner_name_id_dict.iteritems():
getSummonerStats(k, v)
print "MOST USED CHAMPIONS FOUND"
# Read the input txt file and create a dictionary of summoners.
# Dict format is {summonerId: summonerName} since summoners cannot have the same ID
# but could possibly have the same username across different region servers.
def createDictOfSummoners():
f = open(inputLocation, 'r')
read_input = f.readlines()
for i, line in enumerate(read_input):
# Append the summoner ID minus last 2 characters (\n) and summoner name to dict
summoner_name_id_dict.update({line.split(":")[1][:-1]: line.split(":")[0]})
# Get the ranked stats of the given summoner ID
def getSummonerStats(summoner_id, summoner_name):
if api.can_make_request():
try:
summoner_stats = api.get_ranked_stats(summoner_id, region=None, season=None)
except LoLException:
print "GAME DATA NOT FOUND FOR SUMMONER: " + str(summoner_id)
summoner_stats = "{u'modifyDate': 1406927571000L, u'summonerId': 0000, u'champions': [{u'stats': {u'totalPhysicalDamageDealt': 152101, u'totalTurretsKilled': 1, u'totalSessionsPlayed': 1000, u'totalAssists': 10, u'totalDamageDealt': 158764, u'mostChampionKillsPerSession': 2, u'totalPentaKills': 0, u'mostSpellsCast': 0, u'totalDoubleKills': 0, u'maxChampionsKilled': 2, u'totalDeathsPerSession': 8, u'totalSessionsWon': 0, u'totalGoldEarned': 12405, u'totalTripleKills': 0, u'totalChampionKills': 2, u'maxNumDeaths': 8, u'totalMinionKills': 199, u'totalMagicDamageDealt': 5315, u'totalQuadraKills': 0, u'totalUnrealKills': 0, u'totalDamageTaken': 17519, u'totalSessionsLost': 1, u'totalFirstBlood': 0}, u'id': XX}, 2]}"
summoner_id += "XX"
parseSummonerStats(summoner_stats, summoner_id, summoner_name)
else:
print "Not enough API calls for ID: " + summoner_id + " waiting 4 seconds"
time.sleep(4)
getSummonerStats(summoner_id, summoner_name)
# Given the ranked stats, parse it to get the totalSessionsPlayed and
# corresponding champion id value
def parseSummonerStats(summoner_stats, summoner_id, summoner_name):
championsUsed = []
# Break up the stats by summoner
summoner_stats = str(summoner_stats).split(', {')
# Throw away the last summoner (id = 0 this is the combined stats)
summoner_stats = summoner_stats[:-1]
start = "'totalSessionsPlayed': "
end = ", u'totalAssists"
start1 = "u'id': "
end1 = "}"
for s in summoner_stats:
# Get the number of totalSessionsPlayed
result = re.search("%s(.*)%s" % (start, end), str(s)).group(1)
# And the corresponding champion
result1 = re.search("%s(.*)%s" % (start1, end1), str(s)).group(1)
# And create a pair [totalSessionsPlayed, id]
championsUsed.append([result, result1])
sortChampions(summoner_id, summoner_name, championsUsed)
# Sort the list of all champions used by this summoner based on the number
# of totalSessionsPlayed (which is the first value in the pair)
def sortChampions(summoner_id, summoner_name, championsUsed):
if len(championsUsed) > 0:
allChampionsUsed_sorted = sorted(championsUsed, key=itemgetter(0))
# Pass only the LAST pair to getChampionTitle (most used champion!)
getChampionTitle(allChampionsUsed_sorted[-1], summoner_id, summoner_name)
else:
getChampionTitle([0, 1], summoner_id, summoner_name)
# Given a champion ID, look at the loChampionPairs file to get it's
# corresponding champion title
def getChampionTitle(mostUsedChampionPair, summoner_id, summoner_name):
# For every champion in loChampionPairs, get the most used champ's title
for line in champions:
line = line.split(' | ')
if line[0] == mostUsedChampionPair[1]:
if summoner_id.__contains__("XX"):
mostUsedChampion = "GAME DATA NOT FOUND"
else:
mostUsedChampion = line[1]
writeMostUsedChampion(summoner_id, str(summoner_name)+":"+mostUsedChampion)
print "MOST USED CHAMPION FOR ID #" + str(summoner_id).strip("XX") +\
" IS: " + str(mostUsedChampion)
# Write the most used champion to output file
def writeMostUsedChampion(summoner_id, summoner_name_and_champ):
with open(outputLocation, 'a') as file:
file.writelines(summoner_id + ":" + summoner_name_and_champ + "\n")
if __name__ == "__main__":
main()
|
{
"content_hash": "fe8857cc3db606485414f2d5da277444",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 732,
"avg_line_length": 41.35036496350365,
"alnum_prop": 0.6833186231244484,
"repo_name": "Murkantilism/LoL_API_Research",
"id": "04575834ede00735e3f9bba4d7906600a4c24218",
"size": "5665",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "Summoner_Data_Retrieval/Get_Most_Used_Champion_Modified.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92589"
}
],
"symlink_target": ""
}
|
import socket
class Connection(object):
"""
Class responsible for managing an API connection to the mod-host process via socket
"""
client = None
def __init__(self, socket_port=5555, address='localhost'):
self.client = socket.socket()
self.client.connect((address, socket_port))
self.client.settimeout(5)
def send(self, message):
"""
Sends message to *mod-host*.
.. note::
Uses :class:`.ProtocolParser` for a high-level management.
As example, view :class:`.Host`
:param string message: Message that will be sent for *mod-host*
"""
print(message.encode('utf-8'))
self.client.send(message.encode('utf-8'))
received = self.client.recv(1024)
return received
def close(self):
"""
Closes socket connection
"""
self.client.close()
|
{
"content_hash": "1de94f7a2c16655c80be217b5a76fe49",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 87,
"avg_line_length": 25.38888888888889,
"alnum_prop": 0.5831509846827133,
"repo_name": "PedalPi/PluginsManager",
"id": "5b8a6167423e081c136090348c421fa8b0e0fd67",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pluginsmanager/observer/mod_host/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2251"
},
{
"name": "Python",
"bytes": "352748"
}
],
"symlink_target": ""
}
|
import sys
import serial
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print 'test command'
print 'serial: {}'.format(serial)
with serial.Serial('/dev/tty.usbserial-FTH9JD2V', 4800) as ser:
n_reads = 0
while ser.isOpen():
print 'ser: {}'.format(ser)
# x = ser.read()
# print 'x: {}'.format(x)
# s = ser.read(10)
# print 's: {}'.format(s)
line = ser.readline()
print 'line: {}'.format(line)
n_reads += 1
print 'n_reads: {}'.format(n_reads)
sys.stdout.flush()
# ser.close()
# print ser
# print ser.open()
|
{
"content_hash": "7e32c31071b0cd8b66437276b051fc9a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 33.36,
"alnum_prop": 0.46402877697841727,
"repo_name": "opentrv/ors",
"id": "b1a598591778798e63e263d97cb6ac530aca4bd6",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rev2/management/commands/test_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5146"
},
{
"name": "HTML",
"bytes": "8275"
},
{
"name": "JavaScript",
"bytes": "126094"
},
{
"name": "Jupyter Notebook",
"bytes": "23177"
},
{
"name": "Python",
"bytes": "167279"
},
{
"name": "Shell",
"bytes": "690"
}
],
"symlink_target": ""
}
|
"""
RpcTest class and help functions which create Disabled/Skipped tests
>>> sometest=RpcTest("name")
>>> repr(sometest)
'name.py'
>>> sometest.is_disabled()
False
>>> sometest.reason is None
True
>>> sometest.skip_platforms
[]
>>> disabled_test=Disabled("foo", "foo does not work right now")
>>> disabled_test.is_disabled()
True
>>> disabled_test.reason
'foo does not work right now'
>>> disabled_test.skip_platforms
[]
>>> skipped_test=Skip("bar", "", "bar is skipped on all platforms because all matches empty string")
>>> skipped_test.is_skipped()
True
>>> unskipped_test=Skip("baz", "NoSuchPlatform", "baz is not skipped since there is no such platform")
>>> unskipped_test.is_skipped()
False
>>> testwithargs=RpcTest("name --somearg --anotherarg")
>>> repr(testwithargs)
'name.py --somearg --anotherarg'
"""
import platform
# collect as much platform info as we want people to be able to filter
# against for skipped tests
# platform.node() is not included because too easy to get an accidental match
# against a computer's name.
_PLATFORM_INFO = [ platform.machine(),
platform.platform(),
platform.platform(aliased=1),
platform.platform(terse=1),
platform.system(),
','.join(platform.architecture()),
','.join(platform.uname()),
]
class RpcTest(object):
''' Convenience class for RCP tests and disabled/skipped tests '''
def __init__(self, obj):
if isinstance(obj, RpcTest):
self.name = obj.name
self.args = obj.args
self.disabled = obj.disabled
self.reason = obj.reason
self.skip_platforms = obj.skip_platforms
else:
words = str(obj).split(" ") # need to split args
self.name = words[0]
if len(words) > 1:
self.args = words[1:]
else:
self.args = []
self.disabled = False
self.reason = None
self.skip_platforms = []
def is_disabled(self):
''' returns True if test is explicitly disabled (completely) '''
return self.disabled
def is_skipped(self):
''' returns True if test would skip on this platform '''
skip = False
for platform_to_skip in self.skip_platforms:
for pi in _PLATFORM_INFO:
if platform_to_skip.lower() in pi.lower().split(','):
skip = True
break;
return skip
def disable(self, reason):
''' set test to explicitly disabled (completely) '''
self.disabled = True
self.reason = reason
def skip(self, platforms, reason):
''' set test to skip on certain platforms '''
self.reason = reason
if isinstance(platforms, tuple) or isinstance(platforms, list):
for i in platforms:
self.skip_platforms.append(i)
else:
self.skip_platforms.append(platforms)
def __repr__(self):
retval = self.name + '.py'
if self.args:
retval = " ".join([retval, ] + self.args)
return retval
def __str__(self):
return repr(self)
def Disabled(test_name, reason):
''' create a disabled test '''
assert reason, "disabling a test requires a reason!"
rpctest = RpcTest(test_name)
rpctest.disable(reason)
return rpctest
def Skip(test_name, platforms, reason):
''' create a test which is skipped on certain platforms.
The 'platforms' parameter can be a string or tuple/list of strings
which are matched against platform identifiers obtained when
the test is executed.
'''
assert reason, "skipping a test on some platforms requires a reason!"
rpctest = RpcTest(test_name)
rpctest.skip(platforms, reason)
return rpctest
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "8aa9ed9e4c8f5e2b80aa8a25a605fe1d",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 102,
"avg_line_length": 31.0625,
"alnum_prop": 0.596327967806841,
"repo_name": "arruah/ensocoin",
"id": "16b00ea9184dd8df420e5bf196865da9f6127b10",
"size": "4163",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "qa/pull-tester/test_classes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "644011"
},
{
"name": "C++",
"bytes": "4755965"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "157148"
},
{
"name": "Makefile",
"bytes": "106044"
},
{
"name": "NSIS",
"bytes": "13096"
},
{
"name": "Objective-C",
"bytes": "5785"
},
{
"name": "Objective-C++",
"bytes": "7360"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "755824"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3819"
},
{
"name": "Shell",
"bytes": "37748"
}
],
"symlink_target": ""
}
|
job_block = 5
# If it takes ~30 minutes per docking, then 5 docking should take 2.5 hours
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ligand', \
default='../ligand/dock_in', \
help='The root directory to look for ligand mol2 files or a single file (SYBYL atom types)')
parser.add_argument('--library_requirement', default=None, \
help='The ligand file name must contain the string LIBRARY_REQUIREMENT')
parser.add_argument('--receptor', \
default='../receptor/dock_in', \
help='The directory to look for receptors or a single receptor site file (spheres). Grids should have the same prefix.')
parser.add_argument('--output', default='.', \
help='The root directory to put the dock 6 anchor and grow output')
parser.add_argument('--job_block', default=job_block, type=int, \
help='Number of dockings per job')
parser.add_argument('--run_number', default=None, type=int,
help='Up to RUN_NUMBER anchor and grow jobs will run' + \
' (if an argument is passed)')
parser.add_argument('--max_jobs', default=None, type=int)
parser.add_argument('--dry', action='store_true', default=False, \
help='Does not actually submit the job to the queue')
parser.add_argument('--email', default='', type=str, help="Preferred email for notification")
args = parser.parse_args()
# Check for the existence of input files
import os, glob
if os.path.isfile(args.ligand):
ligand_FNs = [args.ligand]
elif os.path.isdir(args.ligand):
args.ligand = os.path.abspath(args.ligand)
ligand_FNs = glob.glob(os.path.join(args.ligand,'*/*.mol2'))
if len(ligand_FNs)==0:
ligand_FNs = glob.glob(os.path.join(args.ligand,'*.mol2'))
else:
raise Exception('Ligand input %s is not a file or directory!'%args.ligand)
ligand_FNs = sorted([os.path.abspath(FN) for FN in ligand_FNs if os.path.getsize(FN)>0])
if os.path.isfile(args.receptor):
receptor_FNs = [args.receptor]
elif os.path.isdir(args.receptor):
args.receptor = os.path.abspath(args.receptor)
receptor_FNs = glob.glob(os.path.join(args.receptor,'*.sph'))
else:
raise Exception('Receptor input %s is not a file or directory!'%args.receptor)
# Require nrg and bmp as well as sph files
receptor_FNs = [os.path.abspath(FN) for FN in receptor_FNs \
if os.path.isfile(FN[:-4]+'.nrg') and os.path.isfile(FN[:-4]+'.bmp')]
if os.path.isdir(args.output):
args.output = os.path.abspath(args.output)
else:
raise Exception('Docking out %s is not a directory!')
# Find anchor_and_grow.py
import os, inspect
dirs = {}
dirs['script'] = os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))
execfile(os.path.join(dirs['script'],'_external_paths.py'))
ancg_script = os.path.join(dirs['script'], 'anchor_and_grow.py')
command_paths = findPaths(['qsub_command'])
print '%d ligands and %d receptors found'%(len(ligand_FNs),len(receptor_FNs))
if (args.library_requirement is not None):
ligand_FNs = [FN for FN in ligand_FNs \
if FN[len(args.ligand)+1:].find(args.library_requirement)>-1]
print '%d ligand(s) meet the library requirement'%(len(ligand_FNs))
if (args.run_number is not None) and (args.run_number < len(ligand_FNs)):
import numpy as np
inds_o = set([int(np.floor(ind)) \
for ind in np.linspace(0,len(ligand_FNs)-1,args.run_number)])
ligand_FNs = [ligand_FNs[ind] for ind in inds_o]
print '%d ligand(s) will be prepared'%(len(ligand_FNs))
import subprocess
command_list = []
out_FNs = []
out_remaps = []
code_list = []
job_count = 0
for receptor_FN in receptor_FNs:
labels = {'receptor':os.path.basename(receptor_FN[:-4])}
for ligand_FN in ligand_FNs:
labels['key'] = os.path.basename(ligand_FN[:-5])
labels['library'] = '.'.join(os.path.dirname(\
ligand_FN[len(args.ligand)+1:]).split('.')[:-1])
out_prefix = os.path.join(args.output, \
labels['library']+'.'+labels['key'][:-2]+'__', \
labels['key'], labels['receptor'])
if not (os.path.exists(out_prefix+'.mol2.gz') or os.path.exists(out_prefix+'.nc')):
out_FN = out_prefix + '.mol2.gz'
command = 'python {0} {1} {2} {3}'.format(
ancg_script, ligand_FN, receptor_FN, out_FN)
command_list.append(command)
out_FNs.append(out_FN)
if os.path.basename(out_FN)!=out_FN:
out_remaps.append(os.path.basename(out_FN))
out_remaps.append(out_FN)
code_list.append(labels['key'])
ncommands = len(command_list)
if ncommands==args.job_block or \
((ncommands>0) and (ligand_FN==ligand_FNs[-1])):
command = '; '.join(command_list)
name = labels['receptor'] + '-' + '.'.join(code_list)
print command
system_command = ' '.join(['python',command_paths['qsub_command'],\
name, "'"+command+"'", \
'--input_files', ancg_script, \
receptor_FN[:-4]+'.nrg', receptor_FN[:-4]+'.bmp', \
'--output_files', ' '.join(out_FNs), \
{True:'--output_remaps ' + ' '.join(out_remaps), \
False:''}[len(out_remaps)>0], \
{True:'--dry',False:''}[args.dry], '--email', args.email])
print system_command
os.system(system_command)
command_list = []
out_FNs = []
out_remaps = []
code_list = []
job_count += 1
print 'Submitted %d jobs'%(job_count)
if (args.max_jobs is not None) and (job_count>=args.max_jobs):
break
if (args.max_jobs is not None) and (job_count>=args.max_jobs):
break
|
{
"content_hash": "07d20645c52c3baa2e9cac02f274c33b",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 122,
"avg_line_length": 40.57142857142857,
"alnum_prop": 0.6558561897702001,
"repo_name": "gkumar7/AlGDock",
"id": "79fd40f701263b0a2b7e84aed3938c3b90ca4814",
"size": "5517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pipeline/run_anchor_and_grow.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "127550"
},
{
"name": "CSS",
"bytes": "2136"
},
{
"name": "CoffeeScript",
"bytes": "13826"
},
{
"name": "JavaScript",
"bytes": "240611"
},
{
"name": "Mathematica",
"bytes": "9061"
},
{
"name": "Python",
"bytes": "834939"
},
{
"name": "Shell",
"bytes": "10278"
}
],
"symlink_target": ""
}
|
import datetime
import os
import smtplib
import socket
from email.MIMEText import MIMEText
from logging.handlers import SMTPHandler
from logging import Formatter, LogRecord, CRITICAL
now = datetime.datetime.now
class SubjectFormatter(Formatter):
def format(self,record):
record.message = record.getMessage()
if self._fmt.find('%(line)') >= 0:
record.line = record.message.split('\n')[0]
if self._fmt.find("%(asctime)") >= 0:
record.asctime = self.formatTime(record, self.datefmt)
if self._fmt.find("%(hostname)") >= 0:
record.hostname = socket.gethostname()
return self._fmt % record.__dict__
class MailingLogger(SMTPHandler):
def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None, secure=None, send_empty_entries=False, flood_level=None):
SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject, credentials=credentials, secure=secure)
self.subject_formatter = SubjectFormatter(subject)
self.send_empty_entries = send_empty_entries
self.flood_level = flood_level
self.hour = now().hour
self.sent = 0
def getSubject(self,record):
return self.subject_formatter.format(record)
def emit(self,record):
current_time = now()
current_hour = current_time.hour
if current_hour > self.hour:
self.hour = current_hour
self.sent = 0
if self.sent == self.flood_level:
# send critical error
record = LogRecord(
name = 'flood',
level = CRITICAL,
pathname = '',
lineno = 0,
msg = """Too Many Log Entries
More than %s entries have been logged that would have resulted in
emails being sent.
No further emails will be sent for log entries generated between
%s and %i:00:00
Please consult any other configured logs, such as a File Logger,
that may contain important entries that have not been emailed.
""" % (self.sent,current_time.strftime('%H:%M:%S'),current_hour+1),
args = (),
exc_info = None)
if not self.send_empty_entries and not record.msg.strip():
return
elif self.sent > self.flood_level:
# do nothing, we've sent too many emails already
return
self.sent += 1
# actually send the mail
try:
import smtplib
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
email = MIMEText(msg)
email['Subject']=self.getSubject(record)
email['From']=self.fromaddr
email['To']=', '.join(self.toaddrs)
email['X-Mailer']='MailingLogger'
if self.username:
if self.secure is not None:
smtp.starttls(*self.secure)
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, email.as_string())
smtp.quit()
except:
self.handleError(record)
|
{
"content_hash": "a751a53c8463de1264f5194a41fb815d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 136,
"avg_line_length": 36.604395604395606,
"alnum_prop": 0.5719003302311618,
"repo_name": "pydotorg/pypi",
"id": "63d16c83864d59a7e8f3c4c08ffdc1739e7359c2",
"size": "3577",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MailingLogger.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "195"
},
{
"name": "CSS",
"bytes": "75520"
},
{
"name": "HTML",
"bytes": "84390"
},
{
"name": "Python",
"bytes": "469430"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
}
|
"""Using Acton to test uncertainty sampling on the iris libsvm dataset."""
import logging
import os.path
import tempfile
import acton.acton
import acton.plot
import h5py
import requests
import sklearn.datasets
import sklearn.preprocessing
with tempfile.TemporaryDirectory() as tempdir:
# Download the dataset.
# We'll store the dataset in this file:
raw_filename = os.path.join(tempdir, 'iris.dat')
dataset_response = requests.get(
'https://www.csie.ntu.edu.tw/'
'~cjlin/libsvmtools/datasets/multiclass/iris.scale')
with open(raw_filename, 'w') as raw_file:
raw_file.write(dataset_response.text)
# Convert the dataset into a format we can use. It's currently libsvm.
X, y = sklearn.datasets.load_svmlight_file(raw_filename)
# Encode labels.
y = sklearn.preprocessing.LabelEncoder().fit_transform(y)
# We'll just save it directly into an HDF5 file:
input_filename = os.path.join(tempdir, 'iris.h5')
with h5py.File(input_filename, 'w') as input_file:
input_file.create_dataset('features', data=X.toarray())
input_file.create_dataset('labels', data=y)
# We'll save output to this file:
output_base_filename = os.path.join(tempdir, 'iris_base.out')
output_unct_filename = os.path.join(tempdir, 'iris_unct.out')
# Run Acton.
logging.root.setLevel(logging.DEBUG)
acton.acton.main(
data_path=input_filename,
feature_cols=['features'],
label_col='labels',
output_path=output_base_filename,
n_epochs=200,
initial_count=10,
recommender='RandomRecommender',
predictor='LogisticRegression')
acton.acton.main(
data_path=input_filename,
feature_cols=['features'],
label_col='labels',
output_path=output_unct_filename,
n_epochs=200,
initial_count=10,
recommender='UncertaintyRecommender',
predictor='LogisticRegression')
# Plot the results.
with open(output_base_filename, 'rb') as predictions_base, \
open(output_unct_filename, 'rb') as predictions_unct:
acton.plot.plot([predictions_base, predictions_unct])
|
{
"content_hash": "d72330bff05809f763f7cfe12c39bbdd",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 74,
"avg_line_length": 35.622950819672134,
"alnum_prop": 0.6723423838011965,
"repo_name": "chengsoonong/acton",
"id": "1bd8792b7420830d4ef21b6f596d861bcfc595af",
"size": "2196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/multiclass_classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2218"
},
{
"name": "Python",
"bytes": "214687"
},
{
"name": "Shell",
"bytes": "594"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import functools
import os
import re
import sys
import subprocess
import traceback
from multiprocessing import Pool
import multiprocessing
args = None
def parse_line(line):
question = line.find('?')
if question == -1:
return None, None
open_paren = line.find('(', question)
if open_paren == -1:
return None, None
close_paren = line.rfind(')', open_paren)
if open_paren == -1:
return None, None
mangled = line[question : open_paren]
demangled = line[open_paren+1 : close_paren]
return mangled.strip(), demangled.strip()
class Result(object):
def __init__(self):
self.crashed = []
self.file = None
self.nsymbols = 0
self.errors = set()
self.nfiles = 0
class MapContext(object):
def __init__(self):
self.rincomplete = None
self.rcumulative = Result()
self.pending_objs = []
self.npending = 0
def process_file(path, objdump):
r = Result()
r.file = path
popen_args = [objdump, '-t', '-demangle', path]
p = subprocess.Popen(popen_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
r.crashed = [r.file]
return r
output = stdout.decode('utf-8')
for line in output.splitlines():
mangled, demangled = parse_line(line)
if mangled is None:
continue
r.nsymbols += 1
if "invalid mangled name" in demangled:
r.errors.add(mangled)
return r
def add_results(r1, r2):
r1.crashed.extend(r2.crashed)
r1.errors.update(r2.errors)
r1.nsymbols += r2.nsymbols
r1.nfiles += r2.nfiles
def print_result_row(directory, result):
print("[{0} files, {1} crashes, {2} errors, {3} symbols]: '{4}'".format(
result.nfiles, len(result.crashed), len(result.errors), result.nsymbols, directory))
def process_one_chunk(pool, chunk_size, objdump, context):
objs = []
incomplete = False
dir_results = {}
ordered_dirs = []
while context.npending > 0 and len(objs) < chunk_size:
this_dir = context.pending_objs[0][0]
ordered_dirs.append(this_dir)
re = Result()
if context.rincomplete is not None:
re = context.rincomplete
context.rincomplete = None
dir_results[this_dir] = re
re.file = this_dir
nneeded = chunk_size - len(objs)
objs_this_dir = context.pending_objs[0][1]
navail = len(objs_this_dir)
ntaken = min(nneeded, navail)
objs.extend(objs_this_dir[0:ntaken])
remaining_objs_this_dir = objs_this_dir[ntaken:]
context.pending_objs[0] = (context.pending_objs[0][0], remaining_objs_this_dir)
context.npending -= ntaken
if ntaken == navail:
context.pending_objs.pop(0)
else:
incomplete = True
re.nfiles += ntaken
assert(len(objs) == chunk_size or context.npending == 0)
copier = functools.partial(process_file, objdump=objdump)
mapped_results = list(pool.map(copier, objs))
for mr in mapped_results:
result_dir = os.path.dirname(mr.file)
result_entry = dir_results[result_dir]
add_results(result_entry, mr)
# It's only possible that a single item is incomplete, and it has to be the
# last item.
if incomplete:
context.rincomplete = dir_results[ordered_dirs[-1]]
ordered_dirs.pop()
# Now ordered_dirs contains a list of all directories which *did* complete.
for c in ordered_dirs:
re = dir_results[c]
add_results(context.rcumulative, re)
print_result_row(c, re)
def process_pending_files(pool, chunk_size, objdump, context):
while context.npending >= chunk_size:
process_one_chunk(pool, chunk_size, objdump, context)
def go():
global args
obj_dir = args.dir
extensions = args.extensions.split(',')
extensions = [x if x[0] == '.' else '.' + x for x in extensions]
pool_size = 48
pool = Pool(processes=pool_size)
try:
nfiles = 0
context = MapContext()
for root, dirs, files in os.walk(obj_dir):
root = os.path.normpath(root)
pending = []
for f in files:
file, ext = os.path.splitext(f)
if not ext in extensions:
continue
nfiles += 1
full_path = os.path.join(root, f)
full_path = os.path.normpath(full_path)
pending.append(full_path)
# If this directory had no object files, just print a default
# status line and continue with the next dir
if len(pending) == 0:
print_result_row(root, Result())
continue
context.npending += len(pending)
context.pending_objs.append((root, pending))
# Drain the tasks, `pool_size` at a time, until we have less than
# `pool_size` tasks remaining.
process_pending_files(pool, pool_size, args.objdump, context)
assert(context.npending < pool_size);
process_one_chunk(pool, pool_size, args.objdump, context)
total = context.rcumulative
nfailed = len(total.errors)
nsuccess = total.nsymbols - nfailed
ncrashed = len(total.crashed)
if (nfailed > 0):
print("Failures:")
for m in sorted(total.errors):
print(" " + m)
if (ncrashed > 0):
print("Crashes:")
for f in sorted(total.crashed):
print(" " + f)
print("Summary:")
spct = float(nsuccess)/float(total.nsymbols)
fpct = float(nfailed)/float(total.nsymbols)
cpct = float(ncrashed)/float(nfiles)
print("Processed {0} object files.".format(nfiles))
print("{0}/{1} symbols successfully demangled ({2:.4%})".format(nsuccess, total.nsymbols, spct))
print("{0} symbols could not be demangled ({1:.4%})".format(nfailed, fpct))
print("{0} files crashed while demangling ({1:.4%})".format(ncrashed, cpct))
except:
traceback.print_exc()
pool.close()
pool.join()
if __name__ == "__main__":
def_obj = 'obj' if sys.platform == 'win32' else 'o'
parser = argparse.ArgumentParser(description='Demangle all symbols in a tree of object files, looking for failures.')
parser.add_argument('dir', type=str, help='the root directory at which to start crawling')
parser.add_argument('--objdump', type=str, help='path to llvm-objdump. If not specified ' +
'the tool is located as if by `which llvm-objdump`.')
parser.add_argument('--extensions', type=str, default=def_obj,
help='comma separated list of extensions to demangle (e.g. `o,obj`). ' +
'By default this will be `obj` on Windows and `o` otherwise.')
args = parser.parse_args()
multiprocessing.freeze_support()
go()
|
{
"content_hash": "bb8f99211b1cb98f0d751405d5c6bfba",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 121,
"avg_line_length": 32.013513513513516,
"alnum_prop": 0.5923737160545941,
"repo_name": "endlessm/chromium-browser",
"id": "00de72b2a18d7158969c21bdf7d5ca62de1a7890",
"size": "7435",
"binary": false,
"copies": "40",
"ref": "refs/heads/master",
"path": "third_party/llvm/llvm/utils/demangle_tree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
making borders for images - padding
create a border around the image (like photo frame) - cv2.copyMakeBorder()
- has more apps for convolution operation, zero padding, etc.
function arguments for cv2.copyMakeBorder() :
- src - input image
- top, bottom, left, right - border width in # of pixels in corresponding directions
- borderType - flag defining what kind of border to be added. Can have following types:
- cv2.
BORDER_CONSTANT - adds a constant colored border. value given as next argument
BORDER_REFLECT - border will be mirror reflection of the border elements, like this:
fedcba|abcdefgh|hgfedcb
BORDER_REFLECT_101 or BORDER_DEFAULT - same as above, but with slight change:
gfedcb|abcdefgh|gfedcba
BORDER_REPLICATE - last element is replicated throughout, like this:
aaaaaa|abcdefgh|hhhhhhh
BORDER_WRAP - looks like:
cdefgh|abcdefgh|abcdefg
- value - color of border if border type is cv2.BORDER_CONSTANT
"""
# example code for border types
import cv2
import numpy as np
from matplotlib import pyplot as plt
BLUE = [255, 0, 0]
img1 = cv2.imread('opencv_logo.png')
replicate = cv2.copyMakeBorder(img1, 10, 10, 10, 10, cv2.BORDER_REPLICATE)
reflect = cv2.copyMakeBorder(img1, 10, 10, 1, 10, cv2.BORDER_REFLECT)
reflect101 = cv2.copyMakeBorder(img1, 10, 10, 10, 10, cv2.BORDER_REFLECT_101)
wrap = cv2.copyMakeBorder(img1, 10, 10, 10, 10, cv2.BORDER_WRAP)
constant = cv2.copyMakeBorder(img1, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=BLUE)
plt.subplot(231), plt.imshow(img1, 'gray'), plt.title('ORIGINAL')
plt.subplot(232), plt.imshow(replicate, 'gray'), plt.title('REPLICATE')
plt.subplot(233), plt.imshow(reflect, 'gray'), plt.title('REFLECT')
plt.subplot(234), plt.imshow(reflect101, 'gray'), plt.title('REFLECT_101')
plt.subplot(235), plt.imshow(wrap, 'gray'), plt.title('WRAP')
plt.subplot(236), plt.imshow(constant, 'gray'), plt.title('CONSTANT')
plt.show()
|
{
"content_hash": "168b6c43e0cea127472a98ea2c853ba7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 96,
"avg_line_length": 46.22727272727273,
"alnum_prop": 0.6927236971484759,
"repo_name": "SSG-DRD-IOT/commercial-iot-security-system",
"id": "3826c0d633489ff0d1082d24229d0f348d762e25",
"size": "2034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencv/tutorials/core/basic/padding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "341"
},
{
"name": "Python",
"bytes": "278625"
}
],
"symlink_target": ""
}
|
"""Asynchronous WSGI_ Remote Procedure Calls middleware. It implements a
JSON-RPC_ server and client. Check out the
:ref:`json-rpc tutorial <tutorials-calculator>` if you want to get started
quickly with a working example.
To quickly setup a server::
class MyRpc(rpc.JSONRPC):
def rpc_ping(self, request):
return 'pong'
class Wsgi(wsgi.LazyWsgi):
def handler(self, environ=None):
app = wsgi.Router('/',
post=MyRpc(),
response_content_types=['application/json'])
return wsgi.WsgiHandler([app])
if __name__ == '__main__':
wsgi.WSGIServer(Wsgi()).start()
* The ``MyRpc`` handles the requests
* Routing is delegated to the :class:`.Router` which handle only ``post``
requests with content type ``application/json``.
API
===========
.. module:: pulsar.apps.rpc.handlers
RpcHandler
~~~~~~~~~~~~~~
.. autoclass:: RpcHandler
:members:
:member-order: bysource
rpc method decorator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: rpc_method
.. module:: pulsar.apps.rpc.jsonrpc
JSON RPC
~~~~~~~~~~~~~~~~
.. autoclass:: JSONRPC
:members:
:member-order: bysource
JsonProxy
~~~~~~~~~~~~~~~~
.. autoclass:: JsonProxy
:members:
:member-order: bysource
.. module:: pulsar.apps.rpc.mixins
Server Commands
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: PulsarServerCommands
:members:
:member-order: bysource
.. _JSON-RPC: http://www.jsonrpc.org/specification
.. _WSGI: http://www.python.org/dev/peps/pep-3333/
"""
from .handlers import (
RpcHandler, rpc_method, InvalidRequest, InvalidParams,
NoSuchFunction, InternalError
)
from .jsonrpc import JSONRPC, JsonProxy, JsonBatchProxy
from .mixins import PulsarServerCommands
__all__ = [
'RpcHandler',
'rpc_method',
'InvalidRequest',
'InvalidParams',
'NoSuchFunction',
'InternalError',
'JSONRPC',
'JsonProxy',
'JsonBatchProxy',
'PulsarServerCommands'
]
|
{
"content_hash": "0ad004bdd960f3a712f8fb6c32f9ae4f",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 74,
"avg_line_length": 20.540816326530614,
"alnum_prop": 0.61698956780924,
"repo_name": "quantmind/pulsar",
"id": "b405a877ce83c8877cdfd061447e8d4743e76f71",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pulsar/apps/rpc/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "C",
"bytes": "1366"
},
{
"name": "CSS",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "1085"
},
{
"name": "JavaScript",
"bytes": "116"
},
{
"name": "Makefile",
"bytes": "2272"
},
{
"name": "Python",
"bytes": "1140291"
},
{
"name": "Shell",
"bytes": "2164"
}
],
"symlink_target": ""
}
|
class Solution:
# if seeing the element again in dict, delete it
# things left are the single elements
def singleNumber(self, nums):
resultDict = {}
for i in nums:
if i in resultDict.keys():
del resultDict[i]
else:
resultDict[i] = 1
return list(resultDict.keys())
# use a dictionary to store counts for each element
def singleNumber(self, nums):
d = {}
for n in nums:
if n not in d:
d[n] = 1
else:
d[n] += 1
return [k for k, v in d.items() if v == 1]
# return [k for k in d if d[k] == 1]
# https://www.jianshu.com/p/c31bd59d7877
def singleNumber(self, nums):
both = set()
double = set()
for n in nums:
if n not in both:
both.add(n)
else:
double.add(n)
single = both - double
return list(single)
if __name__ == "__main__":
print Solution().singleNumber([1, 2, 1, 3, 2, 5])
|
{
"content_hash": "a5b3fc94c10b3e1376c7d8f5fac79746",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 55,
"avg_line_length": 26.85,
"alnum_prop": 0.48417132216014896,
"repo_name": "gengwg/leetcode",
"id": "e7b09b4bcdb03db41bbd37a543fedce66687074d",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "260_single_number_iii.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "779"
},
{
"name": "Python",
"bytes": "627348"
},
{
"name": "SQLPL",
"bytes": "779"
},
{
"name": "Shell",
"bytes": "4149"
}
],
"symlink_target": ""
}
|
import requests
import json
def main():
url = "http://localhost:4000/jsonrpc"
headers = {'content-type': 'application/json'}
# Example echo method
payload = {
"method": "echo",
"params": ["echome!"],
"jsonrpc": "2.0",
"id": 0,
}
response = requests.post(
url, data=json.dumps(payload), headers=headers).json()
assert response["result"] == "echome!"
assert response["jsonrpc"] == "2.0"
assert response["id"] == 0
# Example echo method JSON-RPC 1.0
payload = {
"method": "echo",
"params": ["echome!"],
"id": 0,
}
response = requests.post(
url, data=json.dumps(payload), headers=headers).json()
assert response["result"] == "echome!"
assert response["error"] is None
assert response["id"] == 0
assert "jsonrpc" not in response
# Example add method
payload = {
"method": "add",
"params": [1, 2],
"jsonrpc": "2.0",
"id": 1,
}
response = requests.post(
url, data=json.dumps(payload), headers=headers).json()
assert response["result"] == 3
assert response["jsonrpc"] == "2.0"
assert response["id"] == 1
# Example foobar method
payload = {
"method": "foobar",
"params": {"foo": "json", "bar": "-rpc"},
"jsonrpc": "2.0",
"id": 3,
}
response = requests.post(
url, data=json.dumps(payload), headers=headers).json()
assert response["result"] == "json-rpc"
assert response["jsonrpc"] == "2.0"
assert response["id"] == 3
# Example exception
payload = {
"method": "add",
"params": [0],
"jsonrpc": "2.0",
"id": 4,
}
response = requests.post(
url, data=json.dumps(payload), headers=headers).json()
assert response["error"]["message"] == "Invalid params"
assert response["error"]["code"] == -32602
assert response["jsonrpc"] == "2.0"
assert response["id"] == 4
if __name__ == "__main__":
main()
|
{
"content_hash": "b56fafa99d2d0b7fedc4e56b6e689e03",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 62,
"avg_line_length": 24.914634146341463,
"alnum_prop": 0.534997552618698,
"repo_name": "pavlov99/json-rpc",
"id": "97d97fb1d58ac0fb62d7323bbe33e84bdcff94d9",
"size": "2043",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "469"
},
{
"name": "Python",
"bytes": "138537"
}
],
"symlink_target": ""
}
|
"""This sample uses Rok as an example to show case how VolumeOp accepts
annotations as an extra argument, and how we can use arbitrary PipelineParams
to determine their contents.
The specific annotation is Rok-specific, but the use of annotations in
such way is widespread in storage systems integrated with K8s.
"""
import kfp.deprecated.dsl as dsl
@dsl.pipeline(
name="VolumeSnapshotOp RokURL",
description="The fifth example of the design doc.")
def volume_snapshotop_rokurl(rok_url):
vop1 = dsl.VolumeOp(
name="create_volume_1",
resource_name="vol1",
size="1Gi",
annotations={"rok/origin": rok_url},
modes=dsl.VOLUME_MODE_RWM)
step1 = dsl.ContainerOp(
name="step1_concat",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["cat /data/file*| gzip -c >/data/full.gz"],
pvolumes={"/data": vop1.volume})
step1_snap = dsl.VolumeSnapshotOp(
name="create_snapshot_1", resource_name="snap1", volume=step1.pvolume)
vop2 = dsl.VolumeOp(
name="create_volume_2",
resource_name="vol2",
data_source=step1_snap.snapshot,
size=step1_snap.outputs["size"])
step2 = dsl.ContainerOp(
name="step2_gunzip",
image="library/bash:4.4.23",
command=["gunzip", "-k", "/data/full.gz"],
pvolumes={"/data": vop2.volume})
step2_snap = dsl.VolumeSnapshotOp(
name="create_snapshot_2", resource_name="snap2", volume=step2.pvolume)
vop3 = dsl.VolumeOp(
name="create_volume_3",
resource_name="vol3",
data_source=step2_snap.snapshot,
size=step2_snap.outputs["size"])
step3 = dsl.ContainerOp(
name="step3_output",
image="library/bash:4.4.23",
command=["cat", "/data/full"],
pvolumes={"/data": vop3.volume})
if __name__ == "__main__":
import kfp.deprecated.compiler as compiler
compiler.Compiler().compile(volume_snapshotop_rokurl, __file__ + ".tar.gz")
|
{
"content_hash": "16c0d66094ef62625ebdade6f81fed85",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 31.984126984126984,
"alnum_prop": 0.630272952853598,
"repo_name": "kubeflow/pipelines",
"id": "4ba7d60ebc3d61e381d5e326e09374e131cf8f79",
"size": "2600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/python/tests/compiler/testdata/volume_snapshotop_rokurl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from nospoil.constants import MAX_PLAYOFF_ROUNDS
from .models import Playoff
from .services import create_matches, update_matches
# different serializers because rounds and double filds are needed to create,
# but they should not be changed when editing,
# also grid is needed to displaying one playoff, but not list
class PlayoffListSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
rounds = serializers.IntegerField(min_value=2, max_value=MAX_PLAYOFF_ROUNDS)
class Meta:
model = Playoff
fields = ('url', 'id', 'title', 'sport', 'double', 'rounds',
'owner', 'private')
def create(self, validated_data):
playoff = Playoff.objects.create(**validated_data)
# the logic is more complex than nested serialization
create_matches(playoff, self.initial_data.get('matches', {}))
return playoff
class PlayoffDetailSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
grid = serializers.ReadOnlyField()
class Meta:
model = Playoff
fields = ('url', 'id', 'title', 'sport', 'owner', 'private', 'grid')
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.sport = validated_data.get('sport', instance.sport)
instance.private = validated_data.get('private', instance.private)
instance.save()
# the logic is more complex than nested serialization
update_matches(instance, self.initial_data.get('matches', {}))
return instance
|
{
"content_hash": "f5c74038d37000ae6707a104c9fcfbb8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 40.69047619047619,
"alnum_prop": 0.6986541837331773,
"repo_name": "RomanOsadchuk/nospoil",
"id": "ea0936ce1e4a683db86f18923bbfcd4bd76de9c1",
"size": "1709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playoffs/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3541"
},
{
"name": "HTML",
"bytes": "16164"
},
{
"name": "JavaScript",
"bytes": "13946"
},
{
"name": "Python",
"bytes": "21504"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.