text stringlengths 4 1.02M | meta dict |
|---|---|
"""Base GCP client which uses the discovery API."""
import json
import httplib2
from apiclient import discovery
from googleapiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
from retrying import retry
from google.cloud import security as forseti_security
from google.cloud.security.common.gcp_api import _supported_apis
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import retryable_exceptions
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc
# pylint: disable=missing-param-doc,missing-raises-doc
LOGGER = log_util.get_logger(__name__)
def _attach_user_agent(request):
"""Append our UA to the headers of an googelapiclient request object.
Args:
request: A googlapiclient request object
Returns:
A modified googleapiclient request object.
"""
user_agent = request.headers['user-agent']
request.headers['user-agent'] = user_agent + ', %s/%s ' % (
forseti_security.__package_name__,
forseti_security.__version__)
return request
class BaseClient(object):
"""Base client for a specified GCP API and credentials."""
def __init__(self, credentials=None, api_name=None, **kwargs):
"""Thin client wrapper over the Google Discovery API.
The intent for this class is to define the Google APIs expected by
Forseti. While other APIs and versions can be specified, it may not
be stable and could cause unknown issues in Forseti.
Args:
credentials: Google credentials for auth-ing to the API.
api_name: The API name to wrap. More details here:
https://developers.google.com/api-client-library/python/apis/
kwargs: Additional args such as version.
"""
if not credentials:
credentials = GoogleCredentials.get_application_default()
self._credentials = credentials
self.name = api_name
# Look to see if the API is formally supported in Forseti.
supported_api = _supported_apis.SUPPORTED_APIS.get(api_name)
if not supported_api:
LOGGER.warn('API "%s" is not formally supported in Forseti, '
'proceed at your own risk.', api_name)
# See if the version is supported by Forseti.
# If no version is specified, try to find the supported API's version.
version = kwargs.get('version')
if not version and supported_api:
version = supported_api.get('version')
self.version = version
if supported_api and supported_api.get('version') != version:
LOGGER.warn('API "%s" version %s is not formally supported '
'in Forseti, proceed at your own risk.',
api_name, version)
should_cache_discovery = kwargs.get('cache_discovery')
self.service = discovery.build(self.name,
self.version,
credentials=self._credentials,
cache_discovery=should_cache_discovery)
def __repr__(self):
return 'API: name=%s, version=%s' % (self.name, self.version)
@staticmethod
# The wait time is (2^X * multiplier) milliseconds, where X is the retry
# number.
@retry(retry_on_exception=retryable_exceptions.is_retryable_exception,
wait_exponential_multiplier=1000, wait_exponential_max=10000,
stop_max_attempt_number=5)
def _execute(request, rate_limiter=None):
"""Executes requests in a rate-limited way.
Args:
request: GCP API client request object.
rate_limiter: An instance of RateLimiter to use. Will be None
for api without any rate limits.
Returns:
API response object.
Raises:
When the retry is exceeded, exception will be thrown. This
exception is not wrapped by the retry library, and will be handled
upstream.
"""
request = _attach_user_agent(request)
try:
if rate_limiter is not None:
with rate_limiter:
return request.execute()
return request.execute()
except HttpError as e:
if (e.resp.status == 403 and
e.resp.get('content-type', '').startswith(
'application/json')):
# If a project doesn't have the necessary API enabled, Google
# will return an error domain=usageLimits and
# reason=accessNotConfigured. Clients may wish to handle this
# error in some particular way. For instance, when listing
# resources, it might be treated as "no resources of that type
# are present", if the API would need to be enabled in order
# to create the resources in question!
#
# So, if we find that specific error, raise a different
# exception to indicate it to callers. Otherwise, propagate
# the initial exception.
error_details = json.loads(e.content)
errors = error_details.get('error', {}).get('errors', [])
api_disabled_errors = [
error for error in errors
if (error.get('domain') == 'usageLimits'
and error.get('reason') == 'accessNotConfigured')]
if (api_disabled_errors and
len(api_disabled_errors) == len(errors)):
raise api_errors.ApiNotEnabledError(
api_disabled_errors[0].get('extendedHelp', ''),
e)
raise
def _build_paged_result(self, request, api_stub, rate_limiter,
next_stub=None):
"""Execute results and page through the results.
Use of this method requires the API having a .list_next() method.
Args:
request: GCP API client request object.
api_stub: The API stub used to build the request.
rate_limiter: An instance of RateLimiter to use. Will be None
for api without any rate limits.
next_stub: The API stub used to get the next page of results.
Returns:
A list of paged API response objects.
[{page 1 results}, {page 2 results}, {page 3 results}, ...]
Raises:
api_errors.ApiExecutionError when there is no list_next() method
on the api_stub.
"""
if next_stub is None:
if not hasattr(api_stub, 'list_next'):
raise api_errors.ApiExecutionError(
api_stub, 'No list_next() method.')
next_stub = api_stub.list_next
results = []
while request is not None:
try:
response = self._execute(request, rate_limiter)
results.append(response)
request = next_stub(request, response)
except api_errors.ApiNotEnabledError:
# If the API isn't enabled on the resource, there must
# not be any resources. So, just swallow the error:
# we're done!
break
except (HttpError, httplib2.HttpLib2Error) as e:
raise api_errors.ApiExecutionError(api_stub, e)
return results
@staticmethod
# pylint: disable=invalid-name
def _flatten_aggregated_list_results(paged_results, item_key):
# pylint: enable=invalid-name
"""Flatten a split-up list as returned by GCE "aggregatedList" API.
The compute API's aggregatedList methods return a structure in
the form:
{
items: {
$group_value_1: {
$item_key: [$items]
},
$group_value_2: {
$item_key: [$items]
},
$group_value_3: {
"warning": {
message: "There are no results for ..."
}
},
...,
$group_value_n, {
$item_key: [$items]
},
}
}
where each "$group_value_n" is a particular element in the
aggregation, e.g. a particular zone or group or whatever, and
"$item_key" is some type-specific resource name, e.g.
"backendServices" for an aggregated list of backend services.
This method takes such a structure and yields a simple list of
all $items across all of the groups.
Args:
page_results : A list of paged API response objects.
[{page 1 results}, {page 2 results}, {page 3 results}, ...]
item_key: The name of the key within the inner "items" lists
containing the objects of interest.
Return:
A list of items.
"""
items = []
for page in paged_results:
aggregated_items = page.get('items', {})
for items_for_grouping in aggregated_items.values():
for item in items_for_grouping.get(item_key, []):
items.append(item)
return items
@staticmethod
def _flatten_list_results(paged_results, item_key):
"""Flatten a split-up list as returned by list_next() API.
GCE 'list' APIs return results in the form:
{item_key: [...]}
with one dictionary for each "page" of results. This method flattens
that to a simple list of items.
Args:
paged_results : A list of paged API response objects.
[{page 1 results}, {page 2 results}, {page 3 results}, ...]
item_key: The name of the key within the inner "items" lists
containing the objects of interest.
Return:
A list of GCE resources.
"""
results = []
for page in paged_results:
results.extend(page.get(item_key, []))
return results
| {
"content_hash": "78d497fee4cc75cf263cfaf105969562",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 78,
"avg_line_length": 38.71910112359551,
"alnum_prop": 0.5767072934803638,
"repo_name": "thenenadx/forseti-security",
"id": "d6d78926238a09c434e8a7c893f529ab6ce9ace9",
"size": "10913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/security/common/gcp_api/_base_client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5851"
},
{
"name": "Protocol Buffer",
"bytes": "10441"
},
{
"name": "Python",
"bytes": "1985604"
},
{
"name": "Shell",
"bytes": "2737"
}
],
"symlink_target": ""
} |
import sys
import time
import os
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# text_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'text'))
seeds_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'seeds'))
labelled_data_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'labelled_data'))
from wedc.infrastructure import database
from wedc.infrastructure.model.labelled_data import LabelledData
from wedc.infrastructure.model.need_to_label_data import NeedToLabelData
from wedc.infrastructure.model.seed_dict import SeedDict
class TestDatabaseMethods(unittest.TestCase):
def setUp(self):
pass
def test_create_database(self):
database.create_database()
def test_drop_database(self):
database.drop_database()
def tearDown(self):
pass
class TestLabelledDataMethods(unittest.TestCase):
def setUp(self):
pass
def test_insert_data(self):
LabelledData.insert(content='test_content', label=1, flag=2)
def test_insert_from_csv(self):
csv_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'groundtruth.csv'))
LabelledData.insert_from_csv(csv_)
def test_load_data(self):
for idx, data in enumerate(LabelledData.load_data()):
post_id = idx + 1
# print data.label, data.content
print post_id, data.label, data.extraction
def test_clear_data(self):
print LabelledData.clear_data()
def test_load_potential_seeds(self):
potential_seeds = LabelledData.load_potential_seeds()
for seed, vec in potential_seeds.items():
weight = vec[0]
label = vec[1]
SeedDict.insert(seed, weight)
def test_generate_labelled_data_file(self):
LabelledData.generate_labelled_data_file(labelled_data_)
def test_load_labelled_data_file(self):
print LabelledData.load_labelled_data_file(labelled_data_)[0]
def tearDown(self):
pass
class TestSeedDictMethods(unittest.TestCase):
def setUp(self):
pass
def test_insert_from_txt(self):
txt_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'weighted_seed_dict'))
SeedDict.insert_from_txt(txt_)
def test_load_data(self):
seeds = SeedDict.load_data()
for seed, weight in seeds.items():
print seed, weight
print len(seeds)
def test_clear_data(self):
print SeedDict.clear_data()
def test_generate_seed_file(self):
SeedDict.generate_seed_file(seeds_)
def test_load_seed_file(self):
print SeedDict.load_seed_file(seeds_)
def tearDown(self):
pass
if __name__ == '__main__':
# unittest.main()
def run_main_test():
suite = unittest.TestSuite()
# suite.addTest(TestDatabaseMethods("test_create_database"))
# suite.addTest(TestDatabaseMethods("test_drop_database"))
# suite.addTest(TestLabelledDataMethods("test_insert_data"))
# suite.addTest(TestLabelledDataMethods("test_insert_from_csv"))
# suite.addTest(TestLabelledDataMethods("test_load_data"))
# suite.addTest(TestLabelledDataMethods("test_clear_data"))
# suite.addTest(TestLabelledDataMethods("test_load_potential_seeds"))
suite.addTest(TestLabelledDataMethods("test_generate_labelled_data_file"))
# suite.addTest(TestLabelledDataMethods("test_load_labelled_data_file"))
# suite.addTest(TestSeedDictMethods("test_insert_from_txt"))
# suite.addTest(TestSeedDictMethods("test_load_data"))
# suite.addTest(TestSeedDictMethods("test_clear_data"))
# suite.addTest(TestSeedDictMethods("test_generate_seed_file"))
# suite.addTest(TestSeedDictMethods("test_load_seed_file"))
runner = unittest.TextTestRunner()
runner.run(suite)
run_main_test()
| {
"content_hash": "15cf494757453a2be6fd1782512709d4",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 84,
"avg_line_length": 31.983870967741936,
"alnum_prop": 0.6613716591023702,
"repo_name": "usc-isi-i2/WEDC",
"id": "8b1e2ed336eba193ab273c7c74e8d803595018f0",
"size": "3966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_database.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "139106"
},
{
"name": "Shell",
"bytes": "677"
}
],
"symlink_target": ""
} |
from sqlalchemy.dialects.firebird import base, kinterbasdb, fdb
base.dialect = fdb.dialect
from sqlalchemy.dialects.firebird.base import \
SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \
TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\
dialect
__all__ = (
'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
'dialect'
)
| {
"content_hash": "822f1b90c65733f84ed68f1381dc80f7",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 71,
"avg_line_length": 30.214285714285715,
"alnum_prop": 0.6524822695035462,
"repo_name": "michaelBenin/sqlalchemy",
"id": "094ac3e832de27b1f02f8dda8255e1f2a1d2149e",
"size": "662",
"binary": false,
"copies": "78",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/dialects/firebird/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from helper import TestHelper
import sublime
class TestJumpPrevIndent(TestHelper):
def command(self):
return 'jump_prev_indent'
def test_empty_lines(self):
lines = [
'Lorem ipsum dolor sit amet',
'',
'',
'Lorem ipsum dolor sit amet'
]
starting_selection = [29, 29]
ending_selection = [0, 0]
self.check_command(lines, starting_selection, ending_selection)
def test_indented_lines(self):
lines = [
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [85, 85]
ending_selection = [0, 0]
self.check_command(lines, starting_selection, ending_selection)
def test_beginning_of_file(self):
lines = [
' Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [58, 58]
ending_selection = [58, 58]
self.check_command(lines, starting_selection, ending_selection)
def test_maintain_column(self):
lines = [
'Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [66, 66]
ending_selection = [12, 12]
def test_jump_to_shorter_line(self):
lines = [
'Lorem ipsum dolor sit amet',
'',
'Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet'
]
starting_selection = [81, 81]
ending_selection = [26, 26]
self.check_command(lines, starting_selection, ending_selection)
def test_jump_to_first_intersection(self):
lines = [
' Lorem ipsum dolor sit amet',
'',
' Lorem ipsum dolor sit amet'
]
starting_selection = [33, 33]
ending_selection = [3, 3]
self.check_command(lines, starting_selection, ending_selection)
def test_create_selection(self):
lines = [
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [85, 85]
ending_selection = [85, 0]
self.check_command(lines, starting_selection, ending_selection, extend_selection = True)
def test_extend_selection(self):
lines = [
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [85, 56]
ending_selection = [85, 0]
self.check_command(lines, starting_selection, ending_selection, extend_selection = True)
def test_subtract_selection(self):
lines = [
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [0, 85]
ending_selection = [0, 0]
self.check_command(lines, starting_selection, ending_selection, extend_selection = True)
def test_respect_cursor_position(self):
lines = [
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet'
]
starting_selection = [57, 57]
ending_selection = [1, 1]
self.check_command(lines, starting_selection, ending_selection)
def test_disrespect_cursor_position(self):
settings = sublime.load_settings("jump_along_indent.sublime-settings")
settings.set("respect_cursor_position", False)
lines = [
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet'
]
starting_selection = [57, 57]
ending_selection = [28, 28]
self.check_command(lines, starting_selection, ending_selection)
settings.set("respect_cursor_position", True)
| {
"content_hash": "b0ed694f5be65c2341e63e29d2fb6fe9",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 92,
"avg_line_length": 25.910344827586208,
"alnum_prop": 0.6334841628959276,
"repo_name": "mwean/sublime_jump_along_indent",
"id": "095e1c5944afbbc635178cc8a202577ad2f3d1e6",
"size": "3757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_jump_prev_indent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20528"
}
],
"symlink_target": ""
} |
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from .compat import cookielib
from .cookies import cookiejar_from_dict
from .models import Request
from .hooks import dispatch_hook, default_hooks
from .utils import from_key_val_list, default_headers
from .exceptions import TooManyRedirects, InvalidSchema
from .compat import urlparse, urljoin
from .adapters import HTTPAdapter
from .utils import requote_uri, get_environ_proxies, get_netrc_auth
from .status_codes import codes
REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved)
DEFAULT_REDIRECT_LIMIT = 30
def merge_kwargs(local_kwarg, default_kwarg):
"""Merges kwarg dictionaries.
If a local key in the dictionary is set to None, it will be removed.
"""
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, str):
return local_kwarg
if local_kwarg is None:
return default_kwarg
# Bypass if not a dictionary (e.g. timeout)
if not hasattr(default_kwarg, 'items'):
return local_kwarg
default_kwarg = from_key_val_list(default_kwarg)
local_kwarg = from_key_val_list(local_kwarg)
# Update new values.
kwargs = default_kwarg.copy()
kwargs.update(local_kwarg)
# Remove keys that are set to None.
for (k, v) in local_kwarg.items():
if v is None:
del kwargs[k]
return kwargs
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses."""
i = 0
# ((resp.status_code is codes.see_other))
while (('location' in resp.headers and resp.status_code in REDIRECT_STATI)):
resp.content # Consume socket so it can be released
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
if not urlparse(url).netloc:
# Compliant with RFC3986, we percent encode the url.
url = urljoin(resp.url, requote_uri(url))
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if resp.status_code is codes.see_other:
method = 'GET'
# Do what the browsers do, despite standards...
if resp.status_code in (codes.moved, codes.found) and req.method == 'POST':
method = 'GET'
if (resp.status_code == 303) and req.method != 'HEAD':
method = 'GET'
# Remove the cookie headers that were sent.
headers = req.headers
try:
del headers['Cookie']
except KeyError:
pass
resp = self.request(
url=url,
method=method,
headers=headers,
params=req.params,
auth=req.auth,
cookies=req.cookies,
allow_redirects=False,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies
)
i += 1
yield resp
class Session(SessionRedirectMixin):
"""A Requests session."""
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol to the URL of the proxy (e.g.
#: {'http': 'foo.bar:3128'}) to be used on each
#: :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content.
self.stream = False
#: SSL Verification.
self.verify = True
#: SSL certificate.
self.cert = None
#: Maximum number of redirects to follow.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Should we trust the environment
self.trust_env = True
# Set up a CookieJar to be used by default
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = {}
self.mount('http://', HTTPAdapter())
self.mount('https://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None):
cookies = cookies or {}
proxies = proxies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Bubble down session cookies.
for cookie in self.cookies:
cookies.set_cookie(cookie)
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Set environment's basic authentication.
if not auth:
auth = get_netrc_auth(url)
# Look for configuration.
if not verify and verify is not False:
verify = os.environ.get('REQUESTS_CA_BUNDLE')
# Curl compatibility.
if not verify and verify is not False:
verify = os.environ.get('CURL_CA_BUNDLE')
# Merge all the kwargs.
params = merge_kwargs(params, self.params)
headers = merge_kwargs(headers, self.headers)
auth = merge_kwargs(auth, self.auth)
proxies = merge_kwargs(proxies, self.proxies)
hooks = merge_kwargs(hooks, self.hooks)
stream = merge_kwargs(stream, self.stream)
verify = merge_kwargs(verify, self.verify)
cert = merge_kwargs(cert, self.cert)
# Create the Request.
req = Request()
req.method = method
req.url = url
req.headers = headers
req.files = files
req.data = data
req.params = params
req.auth = auth
req.cookies = cookies
req.hooks = hooks
# Prepare the Request.
prep = req.prepare()
# Send the request.
resp = self.send(prep, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
# Persist cookies.
for cookie in resp.cookies:
self.cookies.set_cookie(cookie)
# Redirect resolving generator.
gen = self.resolve_redirects(resp, req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
# Resolve redirects if allowed.
history = [r for r in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
history.insert(0, resp)
resp = history.pop()
resp.history = tuple(history)
# Response manipulation hook.
self.response = dispatch_hook('response', hooks, resp)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
adapter = self.get_adapter(url=request.url)
r = adapter.send(request, **kwargs)
return r
def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for _, v in self.adapters.items():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix."""
self.adapters[prefix] = adapter
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
| {
"content_hash": "5b5e47a032e8a066d186ab9d278460bb",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 122,
"avg_line_length": 31.72422680412371,
"alnum_prop": 0.585831505402551,
"repo_name": "chushao/Gradesource-Uploader---GUI",
"id": "982e96f129ea9e8fc807f1dbbb71025c4a074993",
"size": "12334",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Gradesource-Uploader-master/requests/sessions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "2862"
},
{
"name": "Python",
"bytes": "880275"
},
{
"name": "Ruby",
"bytes": "4121"
},
{
"name": "Shell",
"bytes": "1134"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.browsers as bsb
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'browsers_nov_2013',
'icons',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.browsers", ALL))
@pytest.mark.sampledata
def test_browsers_nov_2013(pd):
import bokeh.sampledata.browsers as bsb
assert isinstance(bsb.browsers_nov_2013, pd.DataFrame)
# check detail for package data
assert len(bsb.browsers_nov_2013) == 118
@pytest.mark.sampledata
def test_icons():
import bokeh.sampledata.browsers as bsb
assert isinstance(bsb.icons, dict)
# check detail for package data
assert set(bsb.icons.keys()).issubset(set(["Chrome", "Firefox", "Safari", "Opera", "IE"]))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "effca779d2238c15f09bc080efe3de2b",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 94,
"avg_line_length": 32.2,
"alnum_prop": 0.36335403726708076,
"repo_name": "timsnyder/bokeh",
"id": "9c268637e3288b609a880b4dc33fbecaf08bbdef",
"size": "2436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/sampledata/tests/test_browsers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from wtforms.fields.core import SelectField
from wtforms.fields.simple import TextAreaField
from wtforms.validators import DataRequired, ValidationError
from indico.modules.events.payment import payment_settings
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import MultipleItemsField
CURRENCY_CODE_LINK = 'http://en.wikipedia.org/wiki/ISO_4217#Active_codes'
CONDITIONS_DESC = _('The registrant must agree to these conditions before paying. When left empty, no confirmation '
'prompt is shown to the user.')
class AdminSettingsForm(IndicoForm):
currencies = MultipleItemsField(_('Currencies'), [DataRequired()],
fields=[{'id': 'code', 'caption': _('Code')},
{'id': 'name', 'caption': _('Name')}],
unique_field='code',
description=_("List of currencies that can be selected for an event. When deleting "
"a currency, existing events will keep using it. The currency code "
"must be a valid <a href='{0}'>ISO-4217</a> code such "
"as 'EUR' or 'CHF'.").format(CURRENCY_CODE_LINK))
currency = SelectField(_('Currency'), [DataRequired()],
description=_('The default currency for new events. If you add a new currency, you need to '
'save the settings first for it to show up here.'))
conditions = TextAreaField(_('Conditions'), description=CONDITIONS_DESC)
def __init__(self, *args, **kwargs):
super(AdminSettingsForm, self).__init__(*args, **kwargs)
self._set_currencies()
def _set_currencies(self):
currencies = [(c['code'], '{0[code]} ({0[name]})'.format(c)) for c in payment_settings.get('currencies')]
self.currency.choices = sorted(currencies, key=lambda x: x[1].lower())
def validate_currency(self, field):
if field.data not in {c['code'] for c in self.currencies.data}:
raise ValidationError('Please select a different currency.')
class EventSettingsForm(IndicoForm):
conditions = TextAreaField(_('Conditions'), description=CONDITIONS_DESC, render_kw={'rows': 10})
| {
"content_hash": "06add97c857bea4bfa0989f2258e9b9a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 120,
"avg_line_length": 53.19565217391305,
"alnum_prop": 0.5986922762566408,
"repo_name": "mvidalgarcia/indico",
"id": "09a4d79b4fdc78497a014d5bb617ee13ab89b06a",
"size": "2661",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/payment/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "538590"
},
{
"name": "HTML",
"bytes": "1345380"
},
{
"name": "JavaScript",
"bytes": "1781971"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4381847"
},
{
"name": "Shell",
"bytes": "3568"
},
{
"name": "TeX",
"bytes": "22182"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""
Implements operations on volumes residing on VMware datastores.
"""
import urllib
from oslo.utils import units
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
LOG = logging.getLogger(__name__)
LINKED_CLONE_TYPE = 'linked'
FULL_CLONE_TYPE = 'full'
ALREADY_EXISTS = 'AlreadyExists'
FILE_ALREADY_EXISTS = 'FileAlreadyExists'
def split_datastore_path(datastore_path):
"""Split the datastore path to components.
return the datastore name, relative folder path and the file name
E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns
(datastore1, my_volume/, my_volume.vmdk)
:param datastore_path: Datastore path of a file
:return: Parsed datastore name, relative folder path and file name
"""
splits = datastore_path.split('[', 1)[1].split(']', 1)
datastore_name = None
folder_path = None
file_name = None
if len(splits) == 1:
datastore_name = splits[0]
else:
datastore_name, path = splits
# Path will be of form my_volume/my_volume.vmdk
# we need into my_volumes/ and my_volume.vmdk
splits = path.split('/')
file_name = splits[len(splits) - 1]
folder_path = path[:-len(file_name)]
return (datastore_name.strip(), folder_path.strip(), file_name.strip())
class VirtualDiskPath(object):
"""Class representing paths of files comprising a virtual disk."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name)
self._descriptor_ds_file_path = self.get_datastore_file_path(
ds_name, self._descriptor_file_path)
def get_datastore_file_path(self, ds_name, file_path):
"""Get datastore path corresponding to the given file path.
:param ds_name: name of the datastore containing the file represented
by the given file path
:param file_path: absolute path of the file
:return: datastore file path
"""
return "[%s] %s" % (ds_name, file_path)
def get_descriptor_file_path(self):
"""Get absolute file path of the virtual disk descriptor."""
return self._descriptor_file_path
def get_descriptor_ds_file_path(self):
"""Get datastore file path of the virtual disk descriptor."""
return self._descriptor_ds_file_path
class FlatExtentVirtualDiskPath(VirtualDiskPath):
"""Paths of files in a non-monolithic disk with a single flat extent."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
super(FlatExtentVirtualDiskPath, self).__init__(
ds_name, folder_path, disk_name)
self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path,
disk_name)
self._flat_extent_ds_file_path = self.get_datastore_file_path(
ds_name, self._flat_extent_file_path)
def get_flat_extent_file_path(self):
"""Get absolute file path of the flat extent."""
return self._flat_extent_file_path
def get_flat_extent_ds_file_path(self):
"""Get datastore file path of the flat extent."""
return self._flat_extent_ds_file_path
class MonolithicSparseVirtualDiskPath(VirtualDiskPath):
"""Paths of file comprising a monolithic sparse disk."""
pass
class VirtualDiskType(object):
"""Supported virtual disk types."""
EAGER_ZEROED_THICK = "eagerZeroedThick"
PREALLOCATED = "preallocated"
THIN = "thin"
# thick in extra_spec means lazy-zeroed thick disk
EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK,
'thick': PREALLOCATED,
'thin': THIN
}
@staticmethod
def is_valid(extra_spec_disk_type):
"""Check if the given disk type in extra_spec is valid.
:param extra_spec_disk_type: disk type in extra_spec
:return: True if valid
"""
return (extra_spec_disk_type in
VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT)
@staticmethod
def validate(extra_spec_disk_type):
"""Validate the given disk type in extra_spec.
This method throws an instance of InvalidDiskTypeException if the given
disk type is invalid.
:param extra_spec_disk_type: disk type in extra_spec
:raises: InvalidDiskTypeException
"""
if not VirtualDiskType.is_valid(extra_spec_disk_type):
raise error_util.InvalidDiskTypeException(
disk_type=extra_spec_disk_type)
@staticmethod
def get_virtual_disk_type(extra_spec_disk_type):
"""Return disk type corresponding to the extra_spec disk type.
:param extra_spec_disk_type: disk type in extra_spec
:return: virtual disk type
:raises: InvalidDiskTypeException
"""
VirtualDiskType.validate(extra_spec_disk_type)
return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[
extra_spec_disk_type])
class VirtualDiskAdapterType(object):
"""Supported virtual disk adapter types."""
LSI_LOGIC = "lsiLogic"
BUS_LOGIC = "busLogic"
LSI_LOGIC_SAS = "lsiLogicsas"
IDE = "ide"
@staticmethod
def is_valid(adapter_type):
"""Check if the given adapter type is valid.
:param adapter_type: adapter type to check
:return: True if valid
"""
return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE]
@staticmethod
def validate(extra_spec_adapter_type):
"""Validate the given adapter type in extra_spec.
This method throws an instance of InvalidAdapterTypeException if the
given adapter type is invalid.
:param extra_spec_adapter_type: adapter type in extra_spec
:raises: InvalidAdapterTypeException
"""
if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type):
raise error_util.InvalidAdapterTypeException(
invalid_type=extra_spec_adapter_type)
@staticmethod
def get_adapter_type(extra_spec_adapter_type):
"""Get the adapter type to be used in VirtualDiskSpec.
:param extra_spec_adapter_type: adapter type in the extra_spec
:return: adapter type to be used in VirtualDiskSpec
"""
VirtualDiskAdapterType.validate(extra_spec_adapter_type)
# We set the adapter type as lsiLogic for lsiLogicsas since it is not
# supported by VirtualDiskManager APIs. This won't be a problem because
# we attach the virtual disk to the correct controller type and the
# disk adapter type is always resolved using its controller key.
if extra_spec_adapter_type == VirtualDiskAdapterType.LSI_LOGIC_SAS:
return VirtualDiskAdapterType.LSI_LOGIC
return extra_spec_adapter_type
class ControllerType(object):
"""Encapsulate various controller types."""
LSI_LOGIC = 'VirtualLsiLogicController'
BUS_LOGIC = 'VirtualBusLogicController'
LSI_LOGIC_SAS = 'VirtualLsiLogicSASController'
IDE = 'VirtualIDEController'
CONTROLLER_TYPE_DICT = {
VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE: IDE}
@staticmethod
def get_controller_type(adapter_type):
"""Get the disk controller type based on the given adapter type.
:param adapter_type: disk adapter type
:return: controller type corresponding to the given adapter type
:raises: InvalidAdapterTypeException
"""
if adapter_type in ControllerType.CONTROLLER_TYPE_DICT:
return ControllerType.CONTROLLER_TYPE_DICT[adapter_type]
raise error_util.InvalidAdapterTypeException(invalid_type=adapter_type)
@staticmethod
def is_scsi_controller(controller_type):
"""Check if the given controller is a SCSI controller.
:param controller_type: controller type
:return: True if the controller is a SCSI controller
"""
return controller_type in [ControllerType.LSI_LOGIC,
ControllerType.BUS_LOGIC,
ControllerType.LSI_LOGIC_SAS]
class VMwareVolumeOps(object):
"""Manages volume operations."""
def __init__(self, session, max_objects):
self._session = session
self._max_objects = max_objects
def get_backing(self, name):
"""Get the backing based on name.
:param name: Name of the backing
:return: Managed object reference to the backing
"""
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'VirtualMachine',
self._max_objects)
while retrieve_result:
vms = retrieve_result.objects
for vm in vms:
if vm.propSet[0].val == name:
# We got the result, so cancel further retrieval.
self.cancel_retrieval(retrieve_result)
return vm.obj
# Result not obtained, continue retrieving results.
retrieve_result = self.continue_retrieval(retrieve_result)
LOG.debug("Did not find any backing with name: %s" % name)
def delete_backing(self, backing):
"""Delete the backing.
:param backing: Managed object reference to the backing
"""
LOG.debug("Deleting the VM backing: %s." % backing)
task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
backing)
LOG.debug("Initiated deletion of VM backing: %s." % backing)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted the VM backing: %s.") % backing)
# TODO(kartikaditya) Keep the methods not specific to volume in
# a different file
def get_host(self, instance):
"""Get host under which instance is present.
:param instance: Managed object reference of the instance VM
:return: Host managing the instance VM
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, instance,
'runtime.host')
def get_hosts(self):
"""Get all host from the inventory.
:return: All the hosts from the inventory
"""
return self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'HostSystem', self._max_objects)
def continue_retrieval(self, retrieve_result):
"""Continue retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
return self._session.invoke_api(vim_util, 'continue_retrieval',
self._session.vim, retrieve_result)
def cancel_retrieval(self, retrieve_result):
"""Cancel retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
self._session.invoke_api(vim_util, 'cancel_retrieval',
self._session.vim, retrieve_result)
def _is_usable(self, mount_info):
"""Check if a datastore is usable as per the given mount info.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param mount_info: Host mount information
:return: True if datastore is usable
"""
writable = mount_info.accessMode == 'readWrite'
# If mounted attribute is not set, then default is True
mounted = getattr(mount_info, 'mounted', True)
# If accessible attribute is not set, then default is False
accessible = getattr(mount_info, 'accessible', False)
return writable and mounted and accessible
def get_connected_hosts(self, datastore):
"""Get all the hosts to which the datastore is connected and usable.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param datastore: Reference to the datastore entity
:return: List of managed object references of all connected
hosts
"""
summary = self.get_summary(datastore)
if not summary.accessible:
return []
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
if not hasattr(host_mounts, 'DatastoreHostMount'):
return []
connected_hosts = []
for host_mount in host_mounts.DatastoreHostMount:
if self._is_usable(host_mount.mountInfo):
connected_hosts.append(host_mount.key.value)
return connected_hosts
def _in_maintenance(self, summary):
"""Check if a datastore is entering maintenance or in maintenance.
:param summary: Summary information about the datastore
:return: True if the datastore is entering maintenance or in
maintenance
"""
if hasattr(summary, 'maintenanceMode'):
return summary.maintenanceMode in ['enteringMaintenance',
'inMaintenance']
return False
def _is_valid(self, datastore, host):
"""Check if the datastore is valid for the given host.
A datastore is considered valid for a host only if the datastore is
writable, mounted and accessible. Also, the datastore should not be
in maintenance mode.
:param datastore: Reference to the datastore entity
:param host: Reference to the host entity
:return: True if datastore can be used for volume creation
"""
summary = self.get_summary(datastore)
in_maintenance = self._in_maintenance(summary)
if not summary.accessible or in_maintenance:
return False
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
for host_mount in host_mounts.DatastoreHostMount:
if host_mount.key.value == host.value:
return self._is_usable(host_mount.mountInfo)
return False
def get_dss_rp(self, host):
"""Get accessible datastores and resource pool of the host.
:param host: Managed object reference of the host
:return: Datastores accessible to the host and resource pool to which
the host belongs to
"""
props = self._session.invoke_api(vim_util, 'get_object_properties',
self._session.vim, host,
['datastore', 'parent'])
# Get datastores and compute resource or cluster compute resource
datastores = []
compute_resource = None
for elem in props:
for prop in elem.propSet:
if prop.name == 'datastore' and prop.val:
# Consider only if datastores are present under host
datastores = prop.val.ManagedObjectReference
elif prop.name == 'parent':
compute_resource = prop.val
LOG.debug("Datastores attached to host %(host)s are: %(ds)s.",
{'host': host, 'ds': datastores})
# Filter datastores based on if it is accessible, mounted and writable
valid_dss = []
for datastore in datastores:
if self._is_valid(datastore, host):
valid_dss.append(datastore)
# Get resource pool from compute resource or cluster compute resource
resource_pool = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
compute_resource,
'resourcePool')
if not valid_dss:
msg = _("There are no valid datastores attached to %s.") % host
LOG.error(msg)
raise error_util.VimException(msg)
else:
LOG.debug("Valid datastores are: %s", valid_dss)
return (valid_dss, resource_pool)
def _get_parent(self, child, parent_type):
"""Get immediate parent of given type via 'parent' property.
:param child: Child entity reference
:param parent_type: Entity type of the parent
:return: Immediate parent of specific type up the hierarchy via
'parent' property
"""
if not child:
return None
if child._type == parent_type:
return child
parent = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, child, 'parent')
return self._get_parent(parent, parent_type)
def get_dc(self, child):
"""Get parent datacenter up the hierarchy via 'parent' property.
:param child: Reference of the child entity
:return: Parent Datacenter of the param child entity
"""
return self._get_parent(child, 'Datacenter')
def get_vmfolder(self, datacenter):
"""Get the vmFolder.
:param datacenter: Reference to the datacenter entity
:return: vmFolder property of the datacenter
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datacenter,
'vmFolder')
def create_folder(self, parent_folder, child_folder_name):
"""Creates child folder with given name under the given parent folder.
The method first checks if a child folder already exists, if it does,
then it returns a moref for the folder, else it creates one and then
return the moref.
:param parent_folder: Reference to the folder entity
:param child_folder_name: Name of the child folder
:return: Reference to the child folder with input name if it already
exists, else create one and return the reference
"""
LOG.debug("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s." %
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
# Get list of child entities for the parent folder
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, parent_folder,
'childEntity')
child_entities = prop_val.ManagedObjectReference
# Return if the child folder with input name is already present
for child_entity in child_entities:
if child_entity._type != 'Folder':
continue
child_entity_name = self.get_entity_name(child_entity)
if child_entity_name and (urllib.unquote(child_entity_name) ==
child_folder_name):
LOG.debug("Child folder: %s already present.",
child_folder_name)
return child_entity
# Need to create the child folder
child_folder = self._session.invoke_api(self._session.vim,
'CreateFolder', parent_folder,
name=child_folder_name)
LOG.debug("Created child folder: %s." % child_folder)
return child_folder
def extend_virtual_disk(self, requested_size_in_gb, name, dc_ref,
eager_zero=False):
"""Extend the virtual disk to the requested size.
:param requested_size_in_gb: Size of the volume in GB
:param name: Name of the backing
:param dc_ref: Reference datacenter
:param eager_zero: Boolean determining if the free space
is zeroed out
"""
LOG.debug("Extending the volume %(name)s to %(size)s GB.",
{'name': name, 'size': requested_size_in_gb})
diskMgr = self._session.vim.service_content.virtualDiskManager
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,
name=name,
datacenter=dc_ref,
newCapacityKb=size_in_kb,
eagerZero=eager_zero)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully extended the volume %(name)s to "
"%(size)s GB."),
{'name': name, 'size': requested_size_in_gb})
def _create_controller_config_spec(self, adapter_type):
"""Returns config spec for adding a disk controller."""
cf = self._session.vim.client.factory
controller_type = ControllerType.get_controller_type(adapter_type)
controller_device = cf.create('ns0:%s' % controller_type)
controller_device.key = -100
controller_device.busNumber = 0
if ControllerType.is_scsi_controller(controller_type):
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
return controller_spec
def _create_disk_backing(self, disk_type, vmdk_ds_file_path):
"""Creates file backing for virtual disk."""
cf = self._session.vim.client.factory
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == VirtualDiskType.EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == VirtualDiskType.THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = vmdk_ds_file_path or ''
disk_device_bkng.diskMode = 'persistent'
return disk_device_bkng
def _create_virtual_disk_config_spec(self, size_kb, disk_type,
controller_key, vmdk_ds_file_path):
"""Returns config spec for adding a virtual disk."""
cf = self._session.vim.client.factory
disk_device = cf.create('ns0:VirtualDisk')
# disk size should be at least 1024KB
disk_device.capacityInKB = max(units.Ki, int(size_kb))
if controller_key < 0:
disk_device.key = controller_key - 1
else:
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = controller_key
disk_device.backing = self._create_disk_backing(disk_type,
vmdk_ds_file_path)
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
if vmdk_ds_file_path is None:
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
return disk_spec
def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type,
vmdk_ds_file_path=None):
"""Create controller and disk config specs for adding a new disk.
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: Optional datastore file path of an existing
virtual disk. If specified, file backing is
not created for the virtual disk.
:return: list containing controller and disk config specs
"""
controller_spec = None
if adapter_type == 'ide':
# For IDE disks, use one of the default IDE controllers (with keys
# 200 and 201) created as part of backing VM creation.
controller_key = 200
else:
controller_spec = self._create_controller_config_spec(adapter_type)
controller_key = controller_spec.device.key
disk_spec = self._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
vmdk_ds_file_path)
specs = [disk_spec]
if controller_spec is not None:
specs.append(controller_spec)
return specs
def _get_create_spec_disk_less(self, name, ds_name, profileId=None):
"""Return spec for creating disk-less backing.
:param name: Name of the backing
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: storage profile ID for the backing
:return: Spec for creation
"""
cf = self._session.vim.client.factory
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.files = vm_file_info
# Set the hardware version to a compatible version supported by
# vSphere 5.0. This will ensure that the backing VM can be migrated
# without any incompatibility issues in a mixed cluster of ESX hosts
# with versions 5.0 or above.
create_spec.version = "vmx-08"
if profileId:
vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
return create_spec
def get_create_spec(self, name, size_kb, disk_type, ds_name,
profileId=None, adapter_type='lsiLogic'):
"""Return spec for creating backing with a single disk.
:param name: name of the backing
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param ds_name: datastore name where the disk is to be provisioned
:param profileId: storage profile ID for the backing
:param adapter_type: disk adapter type
:return: spec for creation
"""
create_spec = self._get_create_spec_disk_less(name, ds_name, profileId)
create_spec.deviceChange = self._create_specs_for_disk_add(
size_kb, disk_type, adapter_type)
return create_spec
def _create_backing_int(self, folder, resource_pool, host, create_spec):
"""Helper for create backing methods."""
LOG.debug("Creating volume backing with spec: %s.", create_spec)
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
task_info = self._session.wait_for_task(task)
backing = task_info.result
LOG.info(_LI("Successfully created volume backing: %s."), backing)
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
host, ds_name, profileId=None, adapter_type='lsiLogic'):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param folder: Folder, where to create the backing under
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: storage profile ID to be associated with backing
:param adapter_type: Disk adapter type
:return: Reference to the created backing entity
"""
LOG.debug("Creating volume backing with name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s "
"adapter_type: %(adapter_type)s profileId: %(profile)s at "
"folder: %(folder)s resource_pool: %(resource_pool)s "
"host: %(host)s datastore_name: %(ds_name)s.",
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
'ds_name': ds_name, 'profile': profileId, 'host': host,
'adapter_type': adapter_type})
create_spec = self.get_create_spec(name, size_kb, disk_type, ds_name,
profileId, adapter_type)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def create_backing_disk_less(self, name, folder, resource_pool,
host, ds_name, profileId=None):
"""Create disk-less volume backing.
This type of backing is useful for creating volume from image. The
downloaded image from the image service can be copied to a virtual
disk of desired provisioning type and added to the backing VM.
:param name: Name of the backing
:param folder: Folder where the backing is created
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Name of the datastore used for VM storage
:param profileId: Storage profile ID to be associated with backing
:return: Reference to the created backing entity
"""
LOG.debug("Creating disk-less volume backing with name: %(name)s "
"profileId: %(profile)s at folder: %(folder)s "
"resource pool: %(resource_pool)s host: %(host)s "
"datastore_name: %(ds_name)s.",
{'name': name, 'profile': profileId, 'folder': folder,
'resource_pool': resource_pool, 'host': host,
'ds_name': ds_name})
create_spec = self._get_create_spec_disk_less(name, ds_name, profileId)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def get_datastore(self, backing):
"""Get datastore where the backing resides.
:param backing: Reference to the backing
:return: Datastore reference to which the backing belongs
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'datastore').ManagedObjectReference[0]
def get_summary(self, datastore):
"""Get datastore summary.
:param datastore: Reference to the datastore
:return: 'summary' property of the datastore
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'summary')
def _create_relocate_spec_disk_locator(self, datastore, disk_type,
disk_device):
"""Creates spec for disk type conversion during relocate."""
cf = self._session.vim.client.factory
disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator")
disk_locator.datastore = datastore
disk_locator.diskId = disk_device.key
disk_locator.diskBackingInfo = self._create_disk_backing(disk_type,
None)
return disk_locator
def _get_relocate_spec(self, datastore, resource_pool, host,
disk_move_type, disk_type=None, disk_device=None):
"""Return spec for relocating volume backing.
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_move_type: Disk move type option
:param disk_type: Destination disk type
:param disk_device: Virtual device corresponding to the disk
:return: Spec for relocation
"""
cf = self._session.vim.client.factory
relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec')
relocate_spec.datastore = datastore
relocate_spec.pool = resource_pool
relocate_spec.host = host
relocate_spec.diskMoveType = disk_move_type
if disk_type is not None and disk_device is not None:
disk_locator = self._create_relocate_spec_disk_locator(datastore,
disk_type,
disk_device)
relocate_spec.disk = [disk_locator]
LOG.debug("Spec for relocating the backing: %s.", relocate_spec)
return relocate_spec
def relocate_backing(
self, backing, datastore, resource_pool, host, disk_type=None):
"""Relocates backing to the input datastore and resource pool.
The implementation uses moveAllDiskBackingsAndAllowSharing disk move
type.
:param backing: Reference to the backing
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_type: destination disk type
"""
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s with destination disk type: "
"%(disk_type)s.",
{'backing': backing,
'ds': datastore,
'rp': resource_pool,
'disk_type': disk_type})
# Relocate the volume backing
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
disk_device = None
if disk_type is not None:
disk_device = self._get_disk_device(backing)
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s." % backing)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully relocated volume backing: %(backing)s "
"to datastore: %(ds)s and resource pool: %(rp)s.") %
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
def move_backing_to_folder(self, backing, folder):
"""Move the volume backing to the folder.
:param backing: Reference to the backing
:param folder: Reference to the folder
"""
LOG.debug("Moving backing: %(backing)s to folder: %(fol)s." %
{'backing': backing, 'fol': folder})
task = self._session.invoke_api(self._session.vim,
'MoveIntoFolder_Task', folder,
list=[backing])
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s." % {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully moved volume "
"backing: %(backing)s into the "
"folder: %(fol)s.") % {'backing': backing, 'fol': folder})
def create_snapshot(self, backing, name, description, quiesce=False):
"""Create snapshot of the backing with given name and description.
:param backing: Reference to the backing entity
:param name: Snapshot name
:param description: Snapshot description
:param quiesce: Whether to quiesce the backing when taking snapshot
:return: Created snapshot entity reference
"""
LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s." %
{'backing': backing, 'name': name})
task = self._session.invoke_api(self._session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=description,
memory=False, quiesce=quiesce)
LOG.debug("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s." % {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
LOG.info(_LI("Successfully created snapshot: %(snap)s for volume "
"backing: %(backing)s.") %
{'snap': snapshot, 'backing': backing})
return snapshot
@staticmethod
def _get_snapshot_from_tree(name, root):
"""Get snapshot by name from the snapshot tree root.
:param name: Snapshot name
:param root: Current root node in the snapshot tree
:return: None in the snapshot tree with given snapshot name
"""
if not root:
return None
if root.name == name:
return root.snapshot
if (not hasattr(root, 'childSnapshotList') or
not root.childSnapshotList):
# When root does not have children, the childSnapshotList attr
# is missing sometime. Adding an additional check.
return None
for node in root.childSnapshotList:
snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node)
if snapshot:
return snapshot
def get_snapshot(self, backing, name):
"""Get snapshot of the backing with given name.
:param backing: Reference to the backing entity
:param name: Snapshot name
:return: Snapshot entity of the backing with given name
"""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if not snapshot or not snapshot.rootSnapshotList:
return None
for root in snapshot.rootSnapshotList:
return VMwareVolumeOps._get_snapshot_from_tree(name, root)
def snapshot_exists(self, backing):
"""Check if the given backing contains snapshots."""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if snapshot is None or snapshot.rootSnapshotList is None:
return False
return len(snapshot.rootSnapshotList) != 0
def delete_snapshot(self, backing, name):
"""Delete a given snapshot from volume backing.
:param backing: Reference to the backing entity
:param name: Snapshot name
"""
LOG.debug("Deleting the snapshot: %(name)s from backing: "
"%(backing)s." %
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
LOG.info(_LI("Did not find the snapshot: %(name)s for backing: "
"%(backing)s. Need not delete anything.") %
{'name': name, 'backing': backing})
return
task = self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
LOG.debug("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s." %
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: "
"%(backing)s.") % {'backing': backing, 'name': name})
def _get_folder(self, backing):
"""Get parent folder of the backing.
:param backing: Reference to the backing entity
:return: Reference to parent folder of the backing entity
"""
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
disk_type, host=None):
"""Get the clone spec.
:param datastore: Reference to datastore
:param disk_move_type: Disk move type
:param snapshot: Reference to snapshot
:param backing: Source backing VM
:param disk_type: Disk type of clone
:param host: Target host
:return: Clone spec
"""
if disk_type is not None:
disk_device = self._get_disk_device(backing)
else:
disk_device = None
relocate_spec = self._get_relocate_spec(datastore, None, host,
disk_move_type, disk_type,
disk_device)
cf = self._session.vim.client.factory
clone_spec = cf.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = relocate_spec
clone_spec.powerOn = False
clone_spec.template = False
clone_spec.snapshot = snapshot
LOG.debug("Spec for cloning the backing: %s.", clone_spec)
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
disk_type=None, host=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
backing will be created. Else, if it is 'linked', then a linked clone
of the source volume backing will be created.
:param name: Name for the clone
:param backing: Reference to the backing entity
:param snapshot: Snapshot point from which the clone should be done
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
:param disk_type: Disk type of the clone
:param host: Target host
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"host: %(host)s, datastore: %(ds)s with disk type: "
"%(disk_type)s.",
{'back': backing, 'name': name, 'type': clone_type,
'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
'host': host})
folder = self._get_folder(backing)
if clone_type == LINKED_CLONE_TYPE:
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot,
backing, disk_type, host)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)
LOG.debug("Initiated clone of backing: %s." % name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
LOG.info(_LI("Successfully created clone: %s.") % new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
"""Reconfigure backing VM with the given spec."""
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
def attach_disk_to_backing(self, backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path):
"""Attach an existing virtual disk to the backing VM.
:param backing: reference to the backing VM
:param size_in_kb: disk size in KB
:param disk_type: virtual disk type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: datastore file path of the virtual disk to
be attached
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: "
"%(path)s with size (KB): %(size)d and adapter type: "
"%(adapter_type)s.",
{'backing': backing,
'path': vmdk_ds_file_path,
'size': size_in_kb,
'adapter_type': adapter_type})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
specs = self._create_specs_for_disk_add(size_in_kb,
disk_type,
adapter_type,
vmdk_ds_file_path)
reconfig_spec.deviceChange = specs
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %s reconfigured with new disk.", backing)
def rename_backing(self, backing, new_name):
"""Rename backing VM.
:param backing: VM to be renamed
:param new_name: new VM name
"""
LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
rename_task = self._session.invoke_api(self._session.vim,
"Rename_Task",
backing,
newName=new_name)
LOG.debug("Task: %s created for renaming VM.", rename_task)
self._session.wait_for_task(rename_task)
LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
def change_backing_profile(self, backing, profile_id):
"""Change storage profile of the backing VM.
The current profile is removed if the new profile is None.
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:"
" %(profile)s.",
{'backing': backing,
'profile': profile_id})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
if profile_id is None:
vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec')
vm_profile.dynamicType = 'profile'
else:
vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vm_profile.profileId = profile_id.uniqueId
reconfig_spec.vmProfile = [vm_profile]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new profile: "
"%(profile)s.",
{'backing': backing,
'profile': profile_id})
def delete_file(self, file_path, datacenter=None):
"""Delete file or folder on the datastore.
:param file_path: Datastore path of the file or folder
"""
LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s." %
{'file': file_path, 'dc': datacenter})
fileManager = self._session.vim.service_content.fileManager
task = self._session.invoke_api(self._session.vim,
'DeleteDatastoreFile_Task',
fileManager,
name=file_path,
datacenter=datacenter)
LOG.debug("Initiated deletion via task: %s." % task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted file: %s.") % file_path)
def get_path_name(self, backing):
"""Get path name of the backing.
:param backing: Reference to the backing entity
:return: Path name of the backing
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'config.files').vmPathName
def get_entity_name(self, entity):
"""Get name of the managed entity.
:param entity: Reference to the entity
:return: Name of the managed entity
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, entity, 'name')
def _get_disk_device(self, backing):
"""Get the virtual device corresponding to disk."""
hardware_devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
backing,
'config.hardware.device')
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
return device
LOG.error(_LE("Virtual disk device of "
"backing: %s not found."), backing)
raise error_util.VirtualDiskNotFoundException()
def get_vmdk_path(self, backing):
"""Get the vmdk file name of the backing.
The vmdk file path of the backing returned is of the form:
"[datastore1] my_folder/my_vm.vmdk"
:param backing: Reference to the backing
:return: VMDK file path of the backing
"""
disk_device = self._get_disk_device(backing)
backing = disk_device.backing
if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo":
msg = _("Invalid disk backing: %s.") % backing.__class__.__name__
LOG.error(msg)
raise AssertionError(msg)
return backing.fileName
def get_disk_size(self, backing):
"""Get disk size of the backing.
:param backing: backing VM reference
:return: disk size in bytes
"""
disk_device = self._get_disk_device(backing)
return disk_device.capacityInKB * units.Ki
def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type,
disk_type):
"""Return spec for file-backed virtual disk creation."""
cf = self._session.vim.client.factory
spec = cf.create('ns0:FileBackedVirtualDiskSpec')
spec.capacityKb = size_in_kb
spec.adapterType = VirtualDiskAdapterType.get_adapter_type(
adapter_type)
spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type)
return spec
def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type='busLogic', disk_type='preallocated'):
"""Create virtual disk with the given settings.
:param dc_ref: datacenter reference
:param vmdk_ds_file_path: datastore file path of the virtual disk
:param size_in_kb: disk size in KB
:param adapter_type: disk adapter type
:param disk_type: vmdk type
"""
virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb,
adapter_type,
disk_type)
LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec)
disk_manager = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CreateVirtualDisk_Task',
disk_manager,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=virtual_disk_spec)
LOG.debug("Task: %s created for virtual disk creation.", task)
self._session.wait_for_task(task)
LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec)
def create_flat_extent_virtual_disk_descriptor(
self, dc_ref, path, size_in_kb, adapter_type, disk_type):
"""Create descriptor for a single flat extent virtual disk.
To create the descriptor, we create a virtual disk and delete its flat
extent.
:param dc_ref: reference to the datacenter
:param path: descriptor datastore file path
:param size_in_kb: size of the virtual disk in KB
:param adapter_type: virtual disk adapter type
:param disk_type: type of the virtual disk
"""
LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, "
"adapter_type: %(adapter_type)s and disk_type: "
"%(disk_type)s.",
{'path': path.get_descriptor_ds_file_path(),
'size': size_in_kb,
'adapter_type': adapter_type,
'disk_type': disk_type
})
self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(),
size_in_kb, adapter_type, disk_type)
self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref)
LOG.debug("Created descriptor: %s.",
path.get_descriptor_ds_file_path())
def copy_vmdk_file(self, dc_ref, src_vmdk_file_path, dest_vmdk_file_path):
"""Copy contents of the src vmdk file to dest vmdk file.
During the copy also coalesce snapshots of src if present.
dest_vmdk_file_path will be created if not already present.
:param dc_ref: Reference to datacenter containing src and dest
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
"""
LOG.debug('Copying disk data before snapshot of the VM')
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CopyVirtualDisk_Task',
diskMgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dc_ref,
force=True)
LOG.debug("Initiated copying disk data via task: %s." % task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s.") %
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
def delete_vmdk_file(self, vmdk_file_path, dc_ref):
"""Delete given vmdk files.
:param vmdk_file_path: VMDK file path to be deleted
:param dc_ref: Reference to datacenter that contains this VMDK file
"""
LOG.debug("Deleting vmdk file: %s." % vmdk_file_path)
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'DeleteVirtualDisk_Task',
diskMgr,
name=vmdk_file_path,
datacenter=dc_ref)
LOG.debug("Initiated deleting vmdk file via task: %s." % task)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted vmdk file: %s.") % vmdk_file_path)
def get_all_profiles(self):
"""Get all profiles defined in current VC.
:return: PbmProfile data objects from VC
"""
LOG.debug("Get all profiles defined in current VC.")
pbm = self._session.pbm
profile_manager = pbm.service_content.profileManager
res_type = pbm.client.factory.create('ns0:PbmProfileResourceType')
res_type.resourceType = 'STORAGE'
profiles = []
profileIds = self._session.invoke_api(pbm, 'PbmQueryProfile',
profile_manager,
resourceType=res_type)
LOG.debug("Got profile IDs: %s", profileIds)
if profileIds:
profiles = self._session.invoke_api(pbm, 'PbmRetrieveContent',
profile_manager,
profileIds=profileIds)
return profiles
def retrieve_profile_id(self, profile_name):
"""Get the profile uuid from current VC for given profile name.
:param profile_name: profile name as string
:return: profile id as string
"""
LOG.debug("Trying to retrieve profile id for %s", profile_name)
for profile in self.get_all_profiles():
if profile.name == profile_name:
profileId = profile.profileId
LOG.debug("Got profile id %(id)s for profile %(name)s.",
{'id': profileId, 'name': profile_name})
return profileId
def filter_matching_hubs(self, hubs, profile_id):
"""Filter and return only hubs that match given profile.
:param hubs: PbmPlacementHub morefs candidates
:param profile_id: profile id string
:return: subset of hubs that match given profile_id
"""
LOG.debug("Filtering hubs %(hubs)s that match profile "
"%(profile)s.", {'hubs': hubs, 'profile': profile_id})
pbm = self._session.pbm
placement_solver = pbm.service_content.placementSolver
filtered_hubs = self._session.invoke_api(pbm, 'PbmQueryMatchingHub',
placement_solver,
hubsToSearch=hubs,
profile=profile_id)
LOG.debug("Filtered hubs: %s", filtered_hubs)
return filtered_hubs
| {
"content_hash": "cc1d33f54fb30483416d0587f1ef154e",
"timestamp": "",
"source": "github",
"line_count": 1376,
"max_line_length": 79,
"avg_line_length": 44.13953488372093,
"alnum_prop": 0.5691846680716544,
"repo_name": "hguemar/cinder",
"id": "58b81285e6e428cfda16c22a46eaf1c1fdde92fc",
"size": "61368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/vmware/volumeops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10010542"
},
{
"name": "Shell",
"bytes": "9917"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0007_superadmin")]
operations = [
migrations.AddField(
model_name="user",
name="gender",
field=models.CharField(blank=True, max_length=255),
)
]
| {
"content_hash": "aeaf27c65c9e7a576752a471e99574da",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 22.857142857142858,
"alnum_prop": 0.59375,
"repo_name": "FinnStutzenstein/OpenSlides",
"id": "028c666ac6d7e03e63eaf82e9aeac9e605061576",
"size": "369",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "server/openslides/users/migrations/0008_user_gender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124087"
},
{
"name": "Dockerfile",
"bytes": "853"
},
{
"name": "HTML",
"bytes": "449533"
},
{
"name": "JavaScript",
"bytes": "159617"
},
{
"name": "Python",
"bytes": "1398362"
},
{
"name": "Smarty",
"bytes": "7293"
},
{
"name": "TypeScript",
"bytes": "2473991"
}
],
"symlink_target": ""
} |
"""Test sensor of Nettigo Air Monitor integration."""
from datetime import timedelta
from unittest.mock import AsyncMock, Mock, patch
from nettigo_air_monitor import ApiError
from homeassistant.components.nam.const import DOMAIN
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from . import INCOMPLETE_NAM_DATA, init_integration, nam_data
from tests.common import async_fire_time_changed
async def test_sensor(hass):
"""Test states of the air_quality."""
registry = er.async_get(hass)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-signal",
suggested_object_id="nettigo_air_monitor_signal_strength",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-uptime",
suggested_object_id="nettigo_air_monitor_uptime",
disabled_by=None,
)
# Patch return value from utcnow, with offset to make sure the patch is correct
now = utcnow() - timedelta(hours=1)
with patch("homeassistant.components.nam.sensor.utcnow", return_value=now):
await init_integration(hass)
state = hass.states.get("sensor.nettigo_air_monitor_bme280_humidity")
assert state
assert state.state == "45.7"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_bme280_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bme280_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state == "7.6"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_bme280_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bme280_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_bme280_pressure")
assert state
assert state.state == "1011"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PRESSURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_HPA
entry = registry.async_get("sensor.nettigo_air_monitor_bme280_pressure")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bme280_pressure"
state = hass.states.get("sensor.nettigo_air_monitor_bmp180_temperature")
assert state
assert state.state == "7.6"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_bmp180_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp180_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_bmp180_pressure")
assert state
assert state.state == "1032"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PRESSURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_HPA
entry = registry.async_get("sensor.nettigo_air_monitor_bmp180_pressure")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp180_pressure"
state = hass.states.get("sensor.nettigo_air_monitor_bmp280_temperature")
assert state
assert state.state == "5.6"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_bmp280_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp280_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_bmp280_pressure")
assert state
assert state.state == "1022"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PRESSURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_HPA
entry = registry.async_get("sensor.nettigo_air_monitor_bmp280_pressure")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp280_pressure"
state = hass.states.get("sensor.nettigo_air_monitor_sht3x_humidity")
assert state
assert state.state == "34.7"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_sht3x_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sht3x_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_sht3x_temperature")
assert state
assert state.state == "6.3"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_sht3x_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sht3x_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_dht22_humidity")
assert state
assert state.state == "46.2"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_dht22_temperature")
assert state
assert state.state == "6.3"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_heca_humidity")
assert state
assert state.state == "50.0"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_heca_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-heca_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_heca_temperature")
assert state
assert state.state == "8.0"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_heca_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-heca_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_signal_strength")
assert state
assert state.state == "-72"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.SIGNAL_STRENGTH
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
entry = registry.async_get("sensor.nettigo_air_monitor_signal_strength")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-signal"
state = hass.states.get("sensor.nettigo_air_monitor_uptime")
assert state
assert (
state.state
== (now - timedelta(seconds=456987)).replace(microsecond=0).isoformat()
)
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP
assert state.attributes.get(ATTR_STATE_CLASS) is None
entry = registry.async_get("sensor.nettigo_air_monitor_uptime")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-uptime"
state = hass.states.get("sensor.nettigo_air_monitor_sds011_particulate_matter_10")
assert state
assert state.state == "19"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM10
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get("sensor.nettigo_air_monitor_sds011_caqi")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sds011_caqi"
state = hass.states.get("sensor.nettigo_air_monitor_sds011_caqi")
assert state
assert state.state == "19"
assert state.attributes.get(ATTR_ICON) == "mdi:air-filter"
entry = registry.async_get("sensor.nettigo_air_monitor_sds011_caqi_level")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sds011_caqi_level"
state = hass.states.get("sensor.nettigo_air_monitor_sds011_caqi_level")
assert state
assert state.state == "very low"
assert state.attributes.get(ATTR_DEVICE_CLASS) == "nam__caqi_level"
assert state.attributes.get(ATTR_ICON) == "mdi:air-filter"
entry = registry.async_get(
"sensor.nettigo_air_monitor_sds011_particulate_matter_10"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sds011_p1"
state = hass.states.get("sensor.nettigo_air_monitor_sds011_particulate_matter_2_5")
assert state
assert state.state == "11"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM25
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get(
"sensor.nettigo_air_monitor_sds011_particulate_matter_2_5"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sds011_p2"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_1_0")
assert state
assert state.state == "31"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM1
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get("sensor.nettigo_air_monitor_sps30_caqi")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_caqi"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_caqi")
assert state
assert state.state == "54"
assert state.attributes.get(ATTR_ICON) == "mdi:air-filter"
entry = registry.async_get("sensor.nettigo_air_monitor_sps30_caqi_level")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_caqi_level"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_caqi_level")
assert state
assert state.state == "medium"
assert state.attributes.get(ATTR_DEVICE_CLASS) == "nam__caqi_level"
assert state.attributes.get(ATTR_ICON) == "mdi:air-filter"
entry = registry.async_get(
"sensor.nettigo_air_monitor_sps30_particulate_matter_1_0"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p0"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_10")
assert state
assert state.state == "21"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM10
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get("sensor.nettigo_air_monitor_sps30_particulate_matter_10")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p1"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_2_5")
assert state
assert state.state == "34"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM25
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get(
"sensor.nettigo_air_monitor_sps30_particulate_matter_2_5"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p2"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_4_0")
assert state
assert state.state == "25"
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
assert state.attributes.get(ATTR_ICON) == "mdi:molecule"
entry = registry.async_get(
"sensor.nettigo_air_monitor_sps30_particulate_matter_4_0"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p4"
state = hass.states.get("sensor.nettigo_air_monitor_mh_z14a_carbon_dioxide")
assert state
assert state.state == "865"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.CO2
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_MILLION
)
entry = registry.async_get("sensor.nettigo_air_monitor_mh_z14a_carbon_dioxide")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-mhz14a_carbon_dioxide"
async def test_sensor_disabled(hass):
"""Test sensor disabled by default."""
await init_integration(hass)
registry = er.async_get(hass)
entry = registry.async_get("sensor.nettigo_air_monitor_signal_strength")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-signal"
assert entry.disabled
assert entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION
# Test enabling entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
async def test_incompleta_data_after_device_restart(hass):
"""Test states of the air_quality after device restart."""
await init_integration(hass)
state = hass.states.get("sensor.nettigo_air_monitor_heca_temperature")
assert state
assert state.state == "8.0"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
future = utcnow() + timedelta(minutes=6)
update_response = Mock(json=AsyncMock(return_value=INCOMPLETE_NAM_DATA))
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
return_value=update_response,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.nettigo_air_monitor_heca_temperature")
assert state
assert state.state == STATE_UNAVAILABLE
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when device causes an error."""
await init_integration(hass)
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "7.6"
future = utcnow() + timedelta(minutes=6)
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
side_effect=ApiError("API Error"),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=12)
update_response = Mock(json=AsyncMock(return_value=nam_data))
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
return_value=update_response,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "7.6"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeasasistant/update_entity."""
await init_integration(hass)
await async_setup_component(hass, "homeassistant", {})
update_response = Mock(json=AsyncMock(return_value=nam_data))
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
return_value=update_response,
) as mock_get_data:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.nettigo_air_monitor_bme280_temperature"]},
blocking=True,
)
assert mock_get_data.call_count == 1
async def test_unique_id_migration(hass):
"""Test states of the unique_id migration."""
registry = er.async_get(hass)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-temperature",
suggested_object_id="nettigo_air_monitor_dht22_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-humidity",
suggested_object_id="nettigo_air_monitor_dht22_humidity",
disabled_by=None,
)
await init_integration(hass)
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_temperature"
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_humidity"
| {
"content_hash": "72d69c7587a7c92c0941f541710dfd6f",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 93,
"avg_line_length": 39.46837944664031,
"alnum_prop": 0.711882229232387,
"repo_name": "w1ll1am23/home-assistant",
"id": "bee4c515cd0b3ccf5e95973de2369fee11836c34",
"size": "19971",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/nam/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import itertools
import logging
import unittest
import urllib
import environment
import keyspace_util
import utils
from vtdb import dbexceptions
from vtdb import vtgate_cursor
from vtdb import vtgate_client
shard_0_master = None
shard_1_master = None
lookup_master = None
keyspace_env = None
create_vt_user = '''create table vt_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user2 = '''create table vt_user2 (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user_extra = '''create table vt_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_vt_music = '''create table vt_music (
user_id bigint,
id bigint,
song varchar(64),
primary key (user_id, id)
) Engine=InnoDB'''
create_vt_music_extra = '''create table vt_music_extra (
music_id bigint,
user_id bigint,
artist varchar(64),
primary key (music_id)
) Engine=InnoDB'''
create_join_user = '''create table join_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_join_user_extra = '''create table join_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_join_name_info = '''create table join_name_info (
name varchar(128),
info varchar(128),
primary key (name)
) Engine=InnoDB'''
create_twopc_user = '''create table twopc_user (
user_id bigint,
val varchar(128),
primary key (user_id)
) Engine=InnoDB'''
create_vt_user_seq = '''create table vt_user_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_user_seq = 'insert into vt_user_seq values(0, 1, 2)'
create_vt_music_seq = '''create table vt_music_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_music_seq = 'insert into vt_music_seq values(0, 1, 2)'
create_vt_main_seq = '''create table vt_main_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_main_seq = 'insert into vt_main_seq values(0, 1, 2)'
create_name_user2_map = '''create table name_user2_map (
name varchar(64),
user2_id bigint,
primary key (name, user2_id)
) Engine=InnoDB'''
create_music_user_map = '''create table music_user_map (
music_id bigint,
user_id bigint,
primary key (music_id)
) Engine=InnoDB'''
create_main = '''create table main (
id bigint,
val varchar(128),
primary key(id)
) Engine=InnoDB'''
create_twopc_lookup = '''create table twopc_lookup (
id bigint,
val varchar(128),
primary key (id)
) Engine=InnoDB'''
vschema = {
'user': '''{
"sharded": true,
"vindexes": {
"user_index": {
"type": "hash"
},
"unicode_hash": {
"type": "unicode_loose_md5"
},
"name_user2_map": {
"type": "lookup_hash",
"params": {
"table": "name_user2_map",
"from": "name",
"to": "user2_id"
},
"owner": "vt_user2"
},
"music_user_map": {
"type": "lookup_hash_unique",
"params": {
"table": "music_user_map",
"from": "music_id",
"to": "user_id"
},
"owner": "vt_music"
}
},
"tables": {
"vt_user": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_user_seq"
}
},
"vt_user2": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
},
{
"column": "name",
"name": "name_user2_map"
}
]
},
"vt_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
},
"vt_music": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
},
{
"column": "id",
"name": "music_user_map"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_music_seq"
}
},
"vt_music_extra": {
"column_vindexes": [
{
"column": "music_id",
"name": "music_user_map"
},
{
"column": "user_id",
"name": "user_index"
}
]
},
"join_user": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
}
]
},
"join_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
},
"join_name_info": {
"column_vindexes": [
{
"column": "name",
"name": "unicode_hash"
}
]
},
"twopc_user": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
}
}
}''',
'lookup': '''{
"sharded": false,
"tables": {
"vt_user_seq": {
"type": "sequence"
},
"vt_music_seq": {
"type": "sequence"
},
"vt_main_seq": {
"type": "sequence"
},
"music_user_map": {},
"name_user2_map": {},
"main": {
"auto_increment": {
"column": "id",
"sequence": "vt_main_seq"
}
},
"twopc_lookup": {}
}
}''',
}
def setUpModule():
global keyspace_env
global shard_0_master
global shard_1_master
global lookup_master
logging.debug('in setUpModule')
try:
environment.topo_server().setup()
logging.debug('Setting up tablets')
keyspace_env = keyspace_util.TestEnv()
keyspace_env.launch(
'user',
shards=['-80', '80-'],
ddls=[
create_vt_user,
create_vt_user2,
create_vt_user_extra,
create_vt_music,
create_vt_music_extra,
create_join_user,
create_join_user_extra,
create_join_name_info,
create_twopc_user,
],
rdonly_count=1, # to test SplitQuery
twopc_coordinator_address='localhost:15028', # enables 2pc
)
keyspace_env.launch(
'lookup',
ddls=[
create_vt_user_seq,
create_vt_music_seq,
create_vt_main_seq,
create_music_user_map,
create_name_user2_map,
create_main,
create_twopc_lookup,
],
twopc_coordinator_address='localhost:15028', # enables 2pc
)
shard_0_master = keyspace_env.tablet_map['user.-80.master']
shard_1_master = keyspace_env.tablet_map['user.80-.master']
lookup_master = keyspace_env.tablet_map['lookup.0.master']
utils.apply_vschema(vschema)
utils.VtGate().start(
tablets=[shard_0_master, shard_1_master, lookup_master],
extra_args=['-transaction_mode', 'twopc'])
utils.vtgate.wait_for_endpoints('user.-80.master', 1)
utils.vtgate.wait_for_endpoints('user.80-.master', 1)
utils.vtgate.wait_for_endpoints('lookup.0.master', 1)
except:
tearDownModule()
raise
def tearDownModule():
logging.debug('in tearDownModule')
utils.required_teardown()
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
if keyspace_env:
keyspace_env.teardown()
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
def get_connection(timeout=10.0):
protocol, endpoint = utils.vtgate.rpc_endpoint(python=True)
try:
return vtgate_client.connect(protocol, endpoint, timeout)
except Exception:
logging.exception('Connection to vtgate (timeout=%s) failed.', timeout)
raise
class TestVTGateFunctions(unittest.TestCase):
int_type = 265
string_type = 6165
def setUp(self):
self.master_tablet = shard_1_master
def execute_on_master(self, vtgate_conn, sql, bind_vars):
return vtgate_conn._execute(
sql, bind_vars, tablet_type='master', keyspace_name=None)
def test_health(self):
f = urllib.urlopen('http://localhost:%d/debug/health' % utils.vtgate.port)
response = f.read()
f.close()
self.assertEqual(response, 'ok')
def test_srv_vschema(self):
"""Makes sure the SrvVSchema object is properly built."""
v = utils.run_vtctl_json(['GetSrvVSchema', 'test_nj'])
self.assertEqual(len(v['keyspaces']), 2, 'wrong vschema: %s' % str(v))
self.assertIn('user', v['keyspaces'])
self.assertIn('lookup', v['keyspaces'])
def test_user(self):
count = 4
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True)
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
cursor.begin()
cursor.execute(init_vt_user_seq, {})
cursor.commit()
# Test insert
for x in xrange(count):
i = x+1
cursor.begin()
cursor.execute(
'insert into vt_user (name) values (:name)',
{'name': 'test %s' % i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([], 1L, i, []))
cursor.commit()
# Test select equal
for x in xrange(count):
i = x+1
cursor.execute('select id, name from vt_user where id = :id', {'id': i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(i, 'test %s' % i)], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test case sensitivity
cursor.execute('select Id, Name from vt_user where iD = :id', {'id': 1})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(1, 'test 1')], 1L, 0,
[('Id', self.int_type), ('Name', self.string_type)]))
# Test insert with no auto-inc
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id, :name)',
{'id': 6, 'name': 'test 6'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
# Verify values in db
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6')))
# Test MultiValueInsert with no auto-inc
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id0, :name0), (:id1, :name1)',
{'id0': 5, 'name0': 'test 5','id1': 7, 'name1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
# Verify values in db
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')))
# Test IN clause
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 4})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (4L, 'test 4')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 2})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test scatter
result = vtgate_conn._execute(
'select id, name from vt_user',
{}, tablet_type='master', keyspace_name=None)
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'), (6L, 'test 6'), (7L, 'test 7')], 7L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test stream over scatter
stream_cursor_1 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_1.execute('select id, name from vt_user', {})
stream_cursor_2 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_2.execute('select id, name from vt_user', {})
self.assertEqual(stream_cursor_1.description,
[('id', self.int_type), ('name', self.string_type)])
self.assertEqual(stream_cursor_2.description,
[('id', self.int_type), ('name', self.string_type)])
rows_1 = []
rows_2 = []
for row_1, row_2 in itertools.izip(stream_cursor_1, stream_cursor_2):
rows_1.append(row_1)
rows_2.append(row_2)
self.assertEqual(
sorted(rows_1),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'),(6L, 'test 6'),(7L, 'test 7')])
self.assertEqual(
sorted(rows_2),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'),(6L, 'test 6'),(7L, 'test 7')])
# Test updates
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 1, 'name': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 4, 'name': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3'), (5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((4L, 'test four'), (6L, 'test 6'), (7L, 'test 7')))
# Test deletes
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3'), (5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((6L, 'test 6'), (7L, 'test 7')))
# test passing in the keyspace in the cursor
lcursor = vtgate_conn.cursor(
tablet_type='master', keyspace='lookup', writable=True)
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*table vt_user not found in schema.*'):
lcursor.execute('select id, name from vt_user', {})
def test_user2(self):
# user2 is for testing non-unique vindexes
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 7, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id0, :name0),(:id1, :name1)',
{'id0': 2, 'name0': 'name2','id1': 3, 'name1': 'name2'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((1L, 'name1'), (2L, 'name2'), (3L, 'name2')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 1L), ('name1', 7L), ('name2', 2L), ('name2', 3L)))
# Test select by id
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where id = :id', {'id': 1})
self.assertEqual(
result, ([(1, 'name1')], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test select by lookup
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where name = :name', {'name': 'name1'})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test IN clause using non-unique vindex
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1', 'name2')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (2, 'name2'), (3, 'name2'), (7, 'name1')], 4L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test delete
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 2})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((3L, 'name2'),))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 7L), ('name2', 3L)))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 7})
vtgate_conn.commit()
def test_user_extra(self):
# user_extra is for testing unowned functional vindex
count = 4
vtgate_conn = get_connection()
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': i, 'email': 'test %s' % i})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, email from vt_user_extra where user_id = :user_id',
{'user_id': i})
self.assertEqual(
result,
([(i, 'test %s' % i)], 1L, 0,
[('user_id', self.int_type), ('email', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test 4'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 1, 'email': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 4, 'email': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test four'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ())
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 2})
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 3})
vtgate_conn.commit()
def test_music(self):
# music is for testing owned lookup index
vtgate_conn = get_connection()
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
vtgate_conn.begin()
self.execute_on_master(vtgate_conn, init_vt_music_seq, {})
vtgate_conn.commit()
count = 4
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, song) values (:user_id, :song)',
{'user_id': i, 'song': 'test %s' % i})
self.assertEqual(result, ([], 1L, i, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, id, song from vt_music where id = :id', {'id': i})
self.assertEqual(
result,
([(i, i, 'test %s' % i)], 1, 0,
[('user_id', self.int_type),
('id', self.int_type),
('song', self.string_type)]))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, id, song) '
'values (:user_id0, :id0, :song0), (:user_id1, :id1, :song1)',
{'user_id0': 5, 'id0': 6, 'song0': 'test 6','user_id1': 7, 'id1': 7, 'song1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result,
((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test 4'), (7L, 7L, 'test 7')))
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(
result,
((1L, 1L), (2L, 2L), (3L, 3L), (4L, 4L), (6L, 5L), (7L, 7L)))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 6, 'song': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 4, 'song': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test four'), (7L, 7L, 'test 7')))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where id = :id',
{'id': 3})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(result, ((7L, 7L, 'test 7'),))
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(result, ((1L, 1L), (2L, 2L), (6L, 5L), (7L, 7L)))
def test_music_extra(self):
# music_extra is for testing unonwed lookup index
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, user_id, artist) '
'values (:music_id, :user_id, :artist)',
{'music_id': 1, 'user_id': 1, 'artist': 'test 1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, artist) '
'values (:music_id0, :artist0), (:music_id1, :artist1)',
{'music_id0': 6, 'artist0': 'test 6', 'music_id1': 7, 'artist1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select music_id, user_id, artist '
'from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(
result, ([(6L, 5L, 'test 6')], 1, 0,
[('music_id', self.int_type),
('user_id', self.int_type),
('artist', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((7L, 7L, 'test 7'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 6, 'artist': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 7, 'artist': 'test seven'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((7L, 7L, 'test seven'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 7})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'),))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ())
def test_main_seq(self):
# music is for testing owned lookup index
vtgate_conn = get_connection()
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
vtgate_conn.begin()
self.execute_on_master(vtgate_conn, init_vt_main_seq, {})
vtgate_conn.commit()
count = 4
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into main (val) values (:val)',
{'val': 'test %s' % i})
self.assertEqual(result, ([], 1L, i, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn, 'select id, val from main where id = 4', {})
self.assertEqual(
result,
([(4, 'test 4')], 1, 0,
[('id', self.int_type),
('val', self.string_type)]))
def test_joins(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into join_user (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 1, 'email': 'email1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 2, 'email': 'email2'})
self.execute_on_master(
vtgate_conn,
'insert into join_name_info (name, info) '
'values (:name, :info)',
{'name': 'name1', 'info': 'name test'})
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id',
{})
self.assertEqual(
result,
([(1L, 'name1', 1L, 'email1')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+2',
{})
self.assertEqual(
result,
([(1L, 'name1', None, None)],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e on e.user_id = u.id+2 '
'where u.id = 2',
{})
self.assertEqual(
result,
([],
0,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, n.info '
'from join_user u join join_name_info n on u.name = n.name '
'where u.id = 1',
{})
self.assertEqual(
result,
([(1L, 'name1', 'name test')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('info', self.string_type)]))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from join_user where id = :id',
{'id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 2})
vtgate_conn.commit()
def test_insert_value_required(self):
vtgate_conn = get_connection()
try:
vtgate_conn.begin()
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*value must be supplied.*'):
self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (email) values (:email)',
{'email': 'test 10'})
finally:
vtgate_conn.rollback()
def test_transaction_modes(self):
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True, single_db=True)
cursor.begin()
cursor.execute(
'insert into twopc_user (user_id, val) values(1, \'val\')', {})
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*multi-db transaction attempted.*'):
cursor.execute(
'insert into twopc_lookup (id, val) values(1, \'val\')', {})
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True, twopc=True)
cursor.begin()
cursor.execute(
'insert into twopc_user (user_id, val) values(1, \'val\')', {})
cursor.execute(
'insert into twopc_lookup (id, val) values(1, \'val\')', {})
cursor.commit()
cursor.execute('select user_id, val from twopc_user where user_id = 1', {})
self.assertEqual(cursor.fetchall(), [(1, 'val')])
cursor.execute('select id, val from twopc_lookup where id = 1', {})
self.assertEqual(cursor.fetchall(), [(1, 'val')])
cursor.begin()
cursor.execute('delete from twopc_user where user_id = 1', {})
cursor.execute('delete from twopc_lookup where id = 1', {})
cursor.commit()
cursor.execute('select user_id, val from twopc_user where user_id = 1', {})
self.assertEqual(cursor.fetchall(), [])
cursor.execute('select id, val from twopc_lookup where id = 1', {})
self.assertEqual(cursor.fetchall(), [])
def test_vtclient(self):
"""This test uses vtclient to send and receive various queries.
"""
# specify a good default keyspace for the connection here.
utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='user',
bindvars=[10, 'test 10'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 10']],
})
utils.vtgate.vtclient(
'update vt_user_extra set email=:v2 where user_id = :v1',
bindvars=[10, 'test 1000'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], streaming=True, json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 1000']],
})
utils.vtgate.vtclient(
'delete from vt_user_extra where user_id = :v1', bindvars=[10])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': None,
})
# check that specifying an invalid keyspace is propagated and triggers an
# error
_, err = utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='invalid',
bindvars=[10, 'test 10'],
raise_on_error=False)
self.assertIn('keyspace invalid not found in vschema', err)
def test_vtctl_vtgate_execute(self):
"""This test uses 'vtctl VtGateExecute' to send and receive various queries.
"""
utils.vtgate.execute(
'insert into vt_user_extra(user_id, email) values (:user_id, :email)',
bindvars={'user_id': 11, 'email': 'test 11'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Original row: %s', str(qr))
self.assertEqual(qr['fields'][0]['name'], 'user_id')
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 11')
# test using exclude_field_names works.
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11}, execute_options='included_fields:TYPE_ONLY ')
logging.debug('Original row: %s', str(qr))
self.assertNotIn('name', qr['fields'][0])
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 11')
utils.vtgate.execute(
'update vt_user_extra set email=:email where user_id = :user_id',
bindvars={'user_id': 11, 'email': 'test 1100'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Modified row: %s', str(qr))
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 1100')
utils.vtgate.execute(
'delete from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
self.assertEqual(len(qr['rows'] or []), 0)
def test_split_query(self):
"""This test uses 'vtctl VtGateSplitQuery' to validate the Map-Reduce APIs.
We want to return KeyRange queries.
"""
sql = 'select id, name from vt_user'
s = utils.vtgate.split_query(sql, 'user', 2)
self.assertEqual(len(s), 2)
first_half_queries = 0
second_half_queries = 0
for q in s:
self.assertEqual(q['query']['sql'], sql)
self.assertIn('key_range_part', q)
self.assertEqual(len(q['key_range_part']['key_ranges']), 1)
kr = q['key_range_part']['key_ranges'][0]
eighty_in_base64 = 'gA=='
is_first_half = 'start' not in kr and kr['end'] == eighty_in_base64
is_second_half = 'end' not in kr and kr['start'] == eighty_in_base64
self.assertTrue(is_first_half or is_second_half,
'invalid keyrange %s' % str(kr))
if is_first_half:
first_half_queries += 1
else:
second_half_queries += 1
self.assertEqual(first_half_queries, 1, 'invalid split %s' % str(s))
self.assertEqual(second_half_queries, 1, 'invalid split %s' % str(s))
def test_vschema_vars(self):
v = utils.vtgate.get_vars()
self.assertIn('VtgateVSchemaCounts', v)
self.assertIn('Reload', v['VtgateVSchemaCounts'])
self.assertTrue(v['VtgateVSchemaCounts']['Reload'] > 0)
self.assertNotIn('Parsing', v['VtgateVSchemaCounts'])
self.assertNotIn('WatchError', v['VtgateVSchemaCounts'])
if __name__ == '__main__':
utils.main()
| {
"content_hash": "a48162225367a70d02cb5e4031ec596c",
"timestamp": "",
"source": "github",
"line_count": 1235,
"max_line_length": 96,
"avg_line_length": 33.31336032388664,
"alnum_prop": 0.5543240484176754,
"repo_name": "theskyinflames/bpulse-go-client",
"id": "0a92e10efcdece6cd590b54f4cf0acc01c01b58e",
"size": "41181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vendor/github.com/youtube/vitess/test/vtgatev3_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "133726"
},
{
"name": "Shell",
"bytes": "2415"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from hipster_api.fields.base import Field
from hipster_api.fields.str import StringList as _StringList
class Integer(Field):
def to_python(self):
value = super(Integer, self).to_python()
try:
value = int(value)
except ValueError:
value = self.default
self.setitem(value)
return value
class IntegerLarger(Integer):
def __init__(self, larger, equals=False, **kwargs):
self.larger = larger
self.equals = equals
super(IntegerLarger, self).__init__(**kwargs)
def to_python(self):
value = super(IntegerLarger, self).to_python()
if self.equals:
if value < self.larger:
value = self.default
elif value <= self.larger:
value = self.default
self.value = value
return value
class IntegerList(_StringList):
def to_python(self):
value = super(IntegerList, self).to_python()
try:
value = list(map(lambda x: int(x), value))
except ValueError:
self.value = self.default
return self.to_python()
self.value = value
return value
class IntegerLess(Integer):
def __init__(self, less, equals=False, **kwargs):
self.less = less
self.equals = equals
super(IntegerLess, self).__init__(**kwargs)
def to_python(self):
value = super(IntegerLess, self).to_python()
if self.equals:
if value > self.less:
value = self.default
elif value >= self.less:
value = self.default
self.value = value
return value
class Float(Field):
def to_python(self):
value = super(Float, self).to_python()
try:
value = float(str(value).replace(',', '.'))
except ValueError:
value = self.default
self.setitem(value)
return self.value
class FloatLess(Float):
def __init__(self, less, equals=False, **kwargs):
self.less = less
self.equals = equals
super(FloatLess, self).__init__(**kwargs)
def to_python(self):
value = super(FloatLess, self).to_python()
if self.equals:
if value > self.less:
value = self.default
elif value >= self.less:
value = self.default
self.value = value
class FloatLarger(Float):
def __init__(self, larger, equals=False, **kwargs):
self.larger = larger
self.equals = equals
super(FloatLarger, self).__init__(**kwargs)
def to_python(self):
value = super(FloatLarger, self).to_python()
if self.equals:
if value < self.larger:
value = self.default
elif value <= self.larger:
value = self.default
self.value = value
class FloatList(_StringList):
def to_python(self):
value = super(FloatList, self).to_python()
try:
if self.separator != ',':
value = list(map(lambda x: float(x.replace(',', '.')), value))
else:
value = list(map(lambda x: float(x), value))
except ValueError:
self.value = self.default
return self.to_python()
self.value = value
| {
"content_hash": "0790cd2bae28a1ef7028cd308fed22f3",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 78,
"avg_line_length": 27.783333333333335,
"alnum_prop": 0.5560887822435513,
"repo_name": "RustoriaRu/hipster_api",
"id": "50be4538e103b750ca5abb1a40c863c4a12d2762",
"size": "3358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hipster_api/fields/number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1616"
},
{
"name": "Python",
"bytes": "32128"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict, namedtuple
from pants.util.dirutil import safe_mkdir
# Lists of target addresses.
CacheStat = namedtuple('CacheStat', ['hit_targets', 'miss_targets'])
class ArtifactCacheStats(object):
"""Tracks the hits and misses in the artifact cache.
If dir is specified, writes the hits and misses to files in that dir."""
def __init__(self, dir=None):
def init_stat():
return CacheStat([],[])
self.stats_per_cache = defaultdict(init_stat)
self._dir = dir
safe_mkdir(self._dir)
def add_hit(self, cache_name, tgt):
self._add_stat(0, cache_name, tgt)
def add_miss(self, cache_name, tgt):
self._add_stat(1, cache_name, tgt)
def get_all(self):
"""Returns the cache stats as a list of dicts."""
ret = []
for cache_name, stat in self.stats_per_cache.items():
ret.append({
'cache_name': cache_name,
'num_hits': len(stat.hit_targets),
'num_misses': len(stat.miss_targets),
'hits': stat.hit_targets,
'misses': stat.miss_targets
})
return ret
# hit_or_miss is the appropriate index in CacheStat, i.e., 0 for hit, 1 for miss.
def _add_stat(self, hit_or_miss, cache_name, tgt):
self.stats_per_cache[cache_name][hit_or_miss].append(tgt.address.reference())
if self._dir and os.path.exists(self._dir): # Check existence in case of a clean-all.
suffix = 'misses' if hit_or_miss else 'hits'
with open(os.path.join(self._dir, '{}.{}'.format(cache_name, suffix)), 'a') as f:
f.write(tgt.address.reference())
f.write('\n')
| {
"content_hash": "16e0f782d1701011999034ad1eee7748",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 93,
"avg_line_length": 35.04,
"alnum_prop": 0.644406392694064,
"repo_name": "pgroudas/pants",
"id": "22066784a071fbb1addd0b4f62e806ec0b4b35ed",
"size": "1899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/goal/artifact_cache_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "10984"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "HTML",
"bytes": "68090"
},
{
"name": "Java",
"bytes": "297674"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "6172"
},
{
"name": "Python",
"bytes": "2868194"
},
{
"name": "Scala",
"bytes": "105948"
},
{
"name": "Shell",
"bytes": "39579"
},
{
"name": "Thrift",
"bytes": "2824"
}
],
"symlink_target": ""
} |
from fabric.api import *
from fabric.contrib.files import exists
from fabtools import require
import fabtools
import time
MYSQL_PASSWORD = ""
WORDPRESS_USER_NAME = ""
FFT_WORDPRESS_USER_PASSWORD = ""
DB_PRODUCTION_HOST = ""
PRODUCTION_KEYNAME = ""
BLOG_NAME = ""
env.roledefs = {
'development': ['vagrant@127.0.0.1:2222'],
'production': [''],
}
env.root_dir = '/home/%s' % WORDPRESS_USER_NAME
env.wp_config = "conf/wordpress/wp-config-sample"
if (env.roles[0]=="development"):
result = local('vagrant ssh-config | grep IdentityFile', capture=True)
env.key_filename = result.split()[1].strip('"')
env.wp_config = "%s-development.php" % env.wp_config
#settings based on role production
if (env.roles[0]=="production"):
env.key_filename = '~/keys/' % PRODUCTION_KEYNAME
env.wp_config = "%s-production.php" % env.wp_config
def provision():
#update APT package definitions
fabtools.deb.update_index(quiet=False)
#Require a sudo user
fabtools.require.users.user(WORDPRESS_USER_NAME)
fabtools.require.users.sudoer(WORDPRESS_USER_NAME, hosts='ALL', operators='ALL', passwd=False, commands='ALL')
# Require a mysql server
require.mysql.server(password=WORDPRESS_USER_PASSWORD)
# Require a nginx server running
require.nginx.server()
# Require some Debian/Ubuntu packages
require.deb.packages([
'php5-fpm',
'php5-mysql',
])
#transfer project
transfer_project()
#install wordpress
install_wordpress()
# Restart servers
restart_servers()
def install_wordpress():
#download and install wordpress if not previously done
with cd(env.root_dir):
if not exists("wordpress"):
sudo("wget http://wordpress.org/latest.tar.gz")
sudo("tar -xzvf latest.tar.gz")
sudo("rm -f latest.tar.gz")
if (env.roles[0]=="development"):
sudo("mysql --user=root --password=%s < conf/init.sql" % WORDPRESS_USER_PASSWORD)
#transfer wp-config.php
sudo("cp -rf %s wordpress/wp-config.php" % env.wp_config)
#transfer wordpress/languages to wordpress/wp-content/language
sudo("cp -rf conf/wordpress/languages/ wordpress/wp-content/")
#transfer wordpress/plugins to wordpress/wp-content/plugins
sudo("cp -rf conf/wordpress/plugins/ wordpress/wp-content/")
#transfer wordpress/themes to wordpress/wp-content/themes
sudo("cp -rf conf/wordpress/themes/ wordpress/wp-content/")
#create www-dir
if not exists("/var/www/%s" % BLOG_NAME):
sudo("mkdir -p /var/www/%s" % BLOG_NAME)
#transfer wordpress
sudo("cp -r wordpress/* /var/www/")
sudo("cp -r wordpress/* /var/www/%s" % BLOG_NAME)
with cd("/var/www/"):
sudo("chown www-data:www-data * -R")
sudo("usermod -a -G www-data %s" % WORDPRESS_USER_NAME)
print "installed wordpress"
def restart_servers():
with cd(env.root_dir):
sudo("cp -f conf/nginx.conf /etc/nginx/sites-available/default")
sudo("cp -f conf/php.ini /etc/php5/fpm/php.ini")
sudo("service php5-fpm restart")
sudo("/etc/init.d/nginx restart")
def transfer_project():
#make file structure for release
release_name = time.strftime("%Y%m%d%H%M%S")
with cd(env.root_dir):
#makes an archive from git using git-archive-all https://github.com/Kentzo/git-archive-all
local("git-archive-all release_%s.tar.gz" % (release_name))
put("release_%s.tar.gz" % (release_name), env.root_dir, use_sudo=True)
sudo("tar zxf release_%s.tar.gz" % (release_name))
sudo("rm -f release_%s.tar.gz" % (release_name))
local("rm -f release_%s.tar.gz" % (release_name))
sudo("cp -rf www /usr/share/nginx/")
def deploy():
transfer_project()
install_wordpress()
restart_servers()
def backup_conf():
get('/etc/php5/fpm/php.ini','conf/php.ini.backup')
def backup_wordpress():
backup_name = "wordpress_backup_%s.sql" % time.strftime("%Y%m%d%H%M%S")
with cd(env.root_dir):
if (env.roles[0]=="development"):
sudo("mysqldump --add-drop-table --user=root --password=%s wordpress > backups/%s" % (WORDPRESS_USER_PASSWORD, backup_name))
if (env.roles[0]=="production"):
sudo("mysqldump --add-drop-table -h %s -P 3306 -u master -p wordpress > backups/%s" % (DB_PRODUCTION_HOST, backup_name))
get("backups/%s"%(backup_name),"backups/%s" % (backup_name))
local("git add -A")
def restore_backup(name):
with cd(env.root_dir):
if (env.roles[0]=="development"):
sudo("mysql --user=root --password=%s wordpress < backups/%s" % (WORDPRESS_USER_PASSWORD,name))
if (env.roles[0]=="production"):
sudo("mysql -h %s -P 3306 -u master -p wordpress < backups/%s" % (DB_PRODUCTION_HOST, name)) | {
"content_hash": "d791c8589cf98ee62ccb4407af3212e2",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 136,
"avg_line_length": 37.33587786259542,
"alnum_prop": 0.6321815579636066,
"repo_name": "mik4el/wordpress_bootstrap_se_fabric",
"id": "021e3ae42a0c22d3057e2e4574db6b76bfcd80b0",
"size": "4891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19834"
},
{
"name": "JavaScript",
"bytes": "53531"
},
{
"name": "PHP",
"bytes": "1647674"
}
],
"symlink_target": ""
} |
from pprint import isreadable, pformat
from re import search, sub
def search_text(rx, text):
'''
Safe search ``text`` with regex.
:param rx: regex to match on ``text``
:param text: content to work on
:return: either ``None`` if ``rx`` is not in ``text`` or
**match-object** of **re**.
'''
return (
None if not (rx and text) else search(rx, text)
)
def replace_text(rx, replacement, text):
'''
Replace text if ``rx`` matches.
:param rx: regex to match on ``text``
:param replacement: content to put into ``text`` on ``rx`` match
:param text: content to work on
:return str: ``text`` with replaced parts, or unchanged ``text``
'''
if (replacement and search_text(rx, text)):
return sub(rx, replacement, text)
return text
def make_pretty(data):
'''
:param data: ugly data
:return str: pretty data formatted using **pprint.pformat**,
or ``None`` if data was too ugly
'''
if isreadable(data):
return pformat(data)
| {
"content_hash": "5ed23d2b271b45df6bac0f8e9c5e5f5f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 68,
"avg_line_length": 26.125,
"alnum_prop": 0.600956937799043,
"repo_name": "spookey/ffflash",
"id": "c1800f39c776061a7bd08a53c54df566cd06763a",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ffflash/lib/text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1058"
},
{
"name": "Python",
"bytes": "92684"
},
{
"name": "Shell",
"bytes": "823"
}
],
"symlink_target": ""
} |
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from ._base import make_dataset
from ._sag_fast import sag32, sag64
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.validation import _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
@_deprecate_positional_args
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``. Defaults to 1.
beta : float, optional
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
Defaults to 0.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance, default=None
Used when shuffling the data. Pass an int for reproducible output
across multiple function calls.
See :term:`Glossary <random_state>`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(
... solver='sag', multi_class='multinomial')
>>> clf.fit(X, y)
LogisticRegression(multi_class='multinomial', solver='sag')
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
_dtype = [np.float64, np.float32]
X = check_array(X, dtype=_dtype, accept_sparse='csr', order='C')
y = check_array(y, dtype=_dtype, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=X.dtype,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=X.dtype)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=X.dtype, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=X.dtype, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
sag = sag64 if X.dtype == np.float64 else sag32
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| {
"content_hash": "cc1bcd2b303ed5bbaade0431b934adc3",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 79,
"avg_line_length": 37.16811594202898,
"alnum_prop": 0.6031349918115886,
"repo_name": "bnaul/scikit-learn",
"id": "caa9b2d13300320cd71c6e29e38bd568a02f1b25",
"size": "12823",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/_sag.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
} |
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
""" Implementation of SQLAlchemy backend. """
from sqlalchemy.orm import joinedload
from dragon.common import crypt
from dragon.db.sqlalchemy import models
from dragon.db.sqlalchemy.session import get_session
from dragon.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def model_query(context, *args):
session = _session(context)
query = session.query(*args)
return query
def soft_delete_aware_query(context, *args, **kwargs):
"""Stack query helper that accounts for context's `show_deleted` field.
:param show_deleted: if present, overrides context's show_deleted field.
"""
query = model_query(context, *args)
show_deleted = kwargs.get('show_deleted')
if not show_deleted:
query = query.filter_by(deleted_at=None)
return query
def _session(context):
return (context and context.session) or get_session()
"""
def policy_types_get_all(context):
results = model_query(context, models.PolicyType).all()
if not results:
raise exception.NotFound('no policy types were found')
return results
"""
def _encrypt(value):
if value is not None:
return crypt.encrypt(value.encode('utf-8'))
def _decrypt(enc_value):
value = crypt.decrypt(enc_value)
if value is not None:
return unicode(value, 'utf-8')
""" new dragon implementattion """
""" resources """
""" get all resources of given resource type """
def resources_get_by_type(context, tenant_id, resource_type):
result = (model_query(context, models.Resources).
filter_by(tenant_id=tenant_id).
filter_by(resource_type_id=resource_type).all())
return result
def resource_get(context, resource_id):
LOG.warning(resource_id)
result = (model_query(context, models.Resources).
filter_by(id=resource_id).first())
LOG.warning(result)
return result
def resource_delete(context, resource_id):
result = (model_query(context, models.Resources).
filter_by(id=resource_id).first())
result.delete()
def resource_get_all(context, tenant_id):
LOG.debug("at resource_get_all ++++")
result = (model_query(context, models.Resources).
filter_by(tenant_id=tenant_id).all())
return result
def resource_create(context, values):
resource_ref = models.Resources()
resource_ref.update(values)
resource_ref.save(_session(context))
return resource_ref
""" resource_type """
def resource_type_create(context, values):
resource_type_ref = models.Resource_type()
resource_type_ref.update(values)
resource_type_ref.save(_session(context))
return resource_type_ref
def resource_type_get(context, resource_type_id):
result = (model_query(context, models.Resource_type).
filter_by(id=resource_type_id).all())
return result
def resource_type_update_action(context, resource_type_id):
rt_ref = resource_type_get(context, resource_type_id)
action_ref = action_get_by_resource_type(context, resource_type_id)
if (action_ref is not None) and (rt_ref is not None):
values = {'default_action_id': action_ref.id}
rt_ref.update_and_save(values, session=None)
return rt_ref
else:
return None
def resource_type_get_by_name(context, resource_type_name):
result = (model_query(context, models.Resource_type).
filter_by(name=resource_type_name).first())
return result
def resource_type_get_all(context):
LOG.debug(" at: resource_type_get_all +++ ")
result = (model_query(context, models.Resource_type).all())
return result
""" actions """
def action_get_by_resource_type(context, resource_type_id):
LOG.debug(" at: actions_get_by_resource +++ resource_type_id = %s "
% resource_type_id)
result = (model_query(context, models.Actions).
filter_by(resource_type_id=resource_type_id).all())
# LOG.debug("result = %s" % result[0]['name'])
return result
def action_get_default_by_resource_type(context, resource_type_id):
LOG.debug("at:action_get_default_by_resource_type+++resource_type_id = %s "
% resource_type_id)
resource_type = (model_query(context, models.Resource_type).
filter_by(id=resource_type_id).first())
result = (model_query(context, models.Actions).
filter_by(id=resource_type['default_action_id']).first())
LOG.debug("result = %s" % result['name'])
return result
def action_get(context, action_id):
result = (model_query(context, models.Actions).
filter_by(id=action_id).first())
return result
def action_create(context, values):
LOG.debug("at create_action : %s" % values)
action_ref = models.Actions()
action_ref.update(values)
action_ref.save(_session(context))
return action_ref
def action_delete(context, action_id):
result = (model_query(context, models.Actions).
filter_by(id=action_id).first())
result.delete()
""" resource_actions """
""" get actions of a given resource """
def resource_actions_get(context, workload_policy_id, resource_id):
result = (model_query(context, models.Action_resource).
options(joinedload('resource')).options(joinedload('action')).
filter_by(resource_id=resource_id).
filter_by(workload_policy_id=workload_policy_id).all())
return result
# bring table with pointed resources and actions
def resource_actions_get_by_workload(context, workload_policy_id):
LOG.debug(" at: resource_actions_get_by_workload")
result = (model_query(context, models.Action_resource).
options(joinedload('resource')).options(joinedload('action')).
filter_by(workload_policy_id=workload_policy_id).all())
return result
def resource_actions_get_by_resource_id(context, resource_id):
result = (model_query(context, models.Action_resource).
options(joinedload('resource')).options(joinedload('action')).
filter_by(resource_id=resource_id).all())
return result
def resource_actions_create(context, values):
action_resource_ref = models.Action_resource()
action_resource_ref.update(values)
action_resource_ref.save(_session(context))
return action_resource_ref
def resource_actions_update(context, tuple_id, values):
result = (model_query(context, models.Action_resource).
filter_by(id=tuple_id))
result.update(values)
return result
def resource_actions_delete(context, tuple_id):
LOG.debug("sqlalchemy/api: resource_actions_delete: %s"
% (tuple_id))
result = (model_query(context, models.Action_resource).
filter_by(id=tuple_id).first())
result.delete()
def resource_actions_delete_all_by_policy_id(context, workload_policy_id):
results = (model_query(context, models.Action_resource).
filter_by(workload_policy_id=workload_policy_id).all())
if results:
for result in results:
result.delete()
""" workload policy """
def workload_policy_create(context, values):
workload_policy_ref = models.Workload_policies()
workload_policy_ref.update(values)
workload_policy_ref.save(_session(context))
return workload_policy_ref
def workload_policy_delete(context, workload_policy_id):
# db delete of child table entries
workload_policy_execs =\
workload_policy_excution_get_by_workload(context,
workload_policy_id)
for workload_policy_exec in workload_policy_execs:
id = workload_policy_exec['id']
LOG.debug("workload_policy_delete/ workload_policy_exec_id %s "
% (id))
action_excution_delete_all_by_policy_exec(context, id)
workload_policy_execution_delete(context, id)
resource_actions_delete_all_by_policy_id(context, workload_policy_id)
result = (model_query(context, models.Workload_policies).
filter_by(id=workload_policy_id).first())
LOG.debug("workload_policy_delete/ workload_policy_id %s "
% (workload_policy_id))
LOG.debug("workload_policy_delete / delete result %s"
% (result))
# finally delete of workload_policy
result.delete()
def workload_policy_get_all(context, tenant_id):
LOG.debug("workload_policy_get_all . tenant_id = %s" % (tenant_id))
result = (model_query(context, models.Workload_policies).
filter_by(tenant_id=tenant_id).filter_by(deleted_at=None).all())
return result
def workload_policy_get(context, workload_policy_id):
result = (
model_query(context, models.Workload_policies).
filter_by(id=workload_policy_id).filter_by(deleted_at=None).first())
return result
""" workload_policy_execution """
def workload_policy_excution_create(context, values):
workload_policy_execution_ref = models.Workload_policy_execution()
workload_policy_execution_ref.update(values)
workload_policy_execution_ref.save(_session(context))
return workload_policy_execution_ref
def workload_policy_excution_get_by_workload(context, workload_policy_id):
result = (model_query(context, models.Workload_policy_execution).filter_by(
workload_policy_id=workload_policy_id)).\
order_by(models.Workload_policy_execution.created_at.desc()).\
all()
return result
def workload_policy_excution_get(context, policy_execution_id):
result = (model_query(context, models.Workload_policy_execution).
filter_by(id=policy_execution_id).all())
return result
def workload_policy_execution_delete(context, workload_policy_exec_id):
result = (model_query(context, models.Workload_policy_execution).
filter_by(id=workload_policy_exec_id).first())
if result:
result.delete()
def workload_policy_execution_set_status(context,
workload_policy_exec_id,
policy_status):
workload_policy_execution_ref =\
(model_query(context,
models.Workload_policy_execution).
filter_by(id=workload_policy_exec_id).first())
value = {'status': policy_status}
workload_policy_execution_ref.update(value)
workload_policy_execution_ref.save(_session(context))
""" action execution """
def action_excution_create(context, values):
action_execution_ref = models.Action_execution()
action_execution_ref.update(values)
action_execution_ref.save(_session(context))
return action_execution_ref
def action_excution_update(context, workload_action_excution_id, resource_id,
action_id, values):
result = (model_query(context, models.Action_execution).
filter_by(id=workload_action_excution_id).
filter_by(resource_id=resource_id).
filter_by(action_id=action_id).all())
result.update_and_save(values) # just update or also save ??
def action_excution_get_by_workload(context, policy_excution_id):
result = (model_query(context, models.Action_execution).
filter_by(workload_policy_execution_id=policy_excution_id).
options(joinedload('resource')).options(joinedload('action')).
order_by(models.Action_execution.created_at).all())
return result
def action_excution_get(context, workload_action_excution_id,
resource_id, action_id):
result = (model_query(context, models.Action_execution).
filter_by(id=workload_action_excution_id).
filter_by(resource_id=resource_id).
filter_by(action_id=action_id).all())
return result
def action_excution_delete_all_by_policy_exec(context,
workload_policy_execution_id):
results =\
(model_query(context, models.Action_execution).filter_by
(workload_policy_execution_id=workload_policy_execution_id).all())
if results:
for result in results:
result.delete()
| {
"content_hash": "f6e87c4f47081e5ee686860ed21fc162",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 79,
"avg_line_length": 31.568292682926828,
"alnum_prop": 0.6549486208761492,
"repo_name": "os-cloud-storage/openstack-workload-disaster-recovery",
"id": "cc18eb5233412d5bfdc86a8aeeb34d5057198688",
"size": "12988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragon/db/sqlalchemy/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4930"
},
{
"name": "Python",
"bytes": "758400"
},
{
"name": "Shell",
"bytes": "24692"
}
],
"symlink_target": ""
} |
import argparse, sys, datetime
from menagerie.formatting import tsv
from menagerie.decorators import memoized
from mwutil import api
from mwutil.lib import title
from . import database
HEADERS = [
'wiki_db',
'user_id',
'return_namespace',
'return_title'
]
def read_user_returns(f):
if not f.isatty():
return tsv.Reader(f)
def main():
parser = argparse.ArgumentParser(
description="""
Splits the returnTo field in ServerSideAccountCreation into a
namespace ID and normalized page title.
""",
conflict_handler="resolve"
)
parser.add_argument(
'--user-returns',
help="The path to a file containing user returns",
type=lambda path: read_user_returns(open(path)),
default=read_user_returns(sys.stdin)
)
args = parser.parse_args()
run(args.user_returns)
def run(user_returns):
writer = tsv.Writer(sys.stdout, headers=HEADERS)
for row in user_returns:
namespaces = get_namespace(row.wiki)
if row.event_returnTo is not None:
ns_id, title = namespaces.parse(row.event_returnTo)
else:
ns_id = None
title = None
writer.write([row.wiki, row.event_userId, ns_id, title])
@memoized
def get_namespaces(wiki):
prefix = wiki[:2]
url = "https://{0}.wikipedia.org/w/api.php".format(prefix)
session = api.Session(url)
si_doc = session.site_info.query(properties={"namespaces", "namespacealiases"})
return title.Namespaces.from_site_info(si_doc)
| {
"content_hash": "0a8329cb4fd040ba2b5646e2db10027d",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 21.424242424242426,
"alnum_prop": 0.7093352192362093,
"repo_name": "halfak/Measuring-the-impact-of-GettingStarted",
"id": "a4c74ae8e9e26b800134b3eb226e2095c851c0ab",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gs/extract_user_return.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9806"
},
{
"name": "R",
"bytes": "23254"
},
{
"name": "Shell",
"bytes": "1121"
}
],
"symlink_target": ""
} |
import pytest
from aoc2019.day22 import shuffle
SAMPLE_INSTRUCTIONS = [
"""deal with increment 7
deal into new stack
deal into new stack""",
"""cut 6
deal with increment 7
deal into new stack""",
"""deal with increment 7
deal with increment 9
cut -2""",
"""deal into new stack
cut -2
deal with increment 7
cut 8
cut -4
deal with increment 7
cut 3
deal with increment 9
deal with increment 3
cut -1""",
]
CORRECT_SHUFFLES = [
"0 3 6 9 2 5 8 1 4 7",
"3 0 7 4 1 8 5 2 9 6",
"6 3 0 7 4 1 8 5 2 9",
"9 2 5 8 1 4 7 0 3 6",
]
@pytest.mark.parametrize('instructions,correct', zip(SAMPLE_INSTRUCTIONS, CORRECT_SHUFFLES))
def test_shuffle(instructions, correct):
instructions = [line.strip() for line in instructions.splitlines()]
correct = [int(i) for i in correct.split(" ")]
result = shuffle(instructions, 10)
assert result == correct
| {
"content_hash": "bd42e26ecfcdf2c2eee0a28315b73620",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 92,
"avg_line_length": 21.953488372093023,
"alnum_prop": 0.6207627118644068,
"repo_name": "bertptrs/adventofcode",
"id": "78866a72d69e28f624a41c4e3ce468f73dfa1877",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2019/tests/test_day22.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "531"
},
{
"name": "C",
"bytes": "1729"
},
{
"name": "C#",
"bytes": "2880"
},
{
"name": "C++",
"bytes": "449"
},
{
"name": "Clojure",
"bytes": "2454"
},
{
"name": "CoffeeScript",
"bytes": "1765"
},
{
"name": "Go",
"bytes": "856"
},
{
"name": "Groovy",
"bytes": "1457"
},
{
"name": "Haskell",
"bytes": "603"
},
{
"name": "Java",
"bytes": "3238"
},
{
"name": "JavaScript",
"bytes": "1210"
},
{
"name": "Julia",
"bytes": "1144"
},
{
"name": "Kotlin",
"bytes": "1589"
},
{
"name": "Lex",
"bytes": "310"
},
{
"name": "Lua",
"bytes": "2480"
},
{
"name": "MATLAB",
"bytes": "646"
},
{
"name": "Makefile",
"bytes": "983"
},
{
"name": "PHP",
"bytes": "622"
},
{
"name": "Perl",
"bytes": "589"
},
{
"name": "Python",
"bytes": "92178"
},
{
"name": "R",
"bytes": "560"
},
{
"name": "Ruby",
"bytes": "738"
},
{
"name": "Rust",
"bytes": "372899"
},
{
"name": "Scala",
"bytes": "776"
},
{
"name": "Shell",
"bytes": "1957"
},
{
"name": "Swift",
"bytes": "337"
}
],
"symlink_target": ""
} |
"""
Merge two BST.
Tree1: 2 -> 1,3
Tree2: 7 -> 6,8
Approach:
1. Traverse in-order both the tree.
2. Merge the in-order Traverse
3. Ceate the BST from the merge tree
"""
from collections import deque
class Node(object):
def __init__(self,val,left = None, right = None):
self.val = val
self.left = left
self.right = right
def in_order_traverse(root,arr):
if root is None:
return
in_order_traverse(root.left,arr)
arr.append(root.val)
in_order_traverse(root.right,arr)
return arr
def merge_array(arr1,arr2):
merge_arr = []
i, j = 0, 0
hi = len(arr1) + len(arr2)
for k in range(0,hi):
if i >= len(arr1):
merge_arr.append(arr2[j])
j += 1
elif j >= len(arr2):
merge_arr.append(arr1[i])
i += 1
elif arr1[i] < arr2[j]:
merge_arr.append(arr1[i])
i += 1
else:
merge_arr.append(arr2[j])
j += 1
return merge_arr
def create_tree(list,start,end):
if start > end:
return
mid = start + (end - start) // 2
left_tree = create_tree(list,start, mid - 1)
right_tree = create_tree(list, mid + 1, end)
return Node(list[mid], left_tree, right_tree)
def level_order_traverse(root):
if root is None:
return
queue = deque()
queue.append(root)
current_level = 1
next_level = 0
while(queue):
current_node = queue.popleft()
current_level -= 1
print(current_node.val, end = " ")
if current_node.left is not None:
queue.append(current_node.left)
next_level += 1
if current_node.right is not None:
queue.append(current_node.right)
next_level += 1
if current_level == 0:
current_level = next_level
next_level = 0
print()
tree1 = Node(2,Node(1),Node(3))
tree2 = Node(7,Node(6),Node(8))
in_order_arr1 = in_order_traverse(tree1, [])
print(in_order_arr1)
in_order_arr2 = in_order_traverse(tree2, [])
print(in_order_arr2)
merge_arr = merge_array(in_order_arr1,in_order_arr2)
print(merge_arr)
tree = create_tree(merge_arr,0 , len(merge_arr) - 1)
level_order_traverse(tree)
| {
"content_hash": "f9fb43254254d9c4d4285fcb1c3a9d51",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 53,
"avg_line_length": 23.395833333333332,
"alnum_prop": 0.568566340160285,
"repo_name": "bkpathak/HackerRank-Problems",
"id": "1a19b0c44c7a185a187354361de4155e7057a991",
"size": "2246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/tree/merge_bst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "48269"
},
{
"name": "Python",
"bytes": "168138"
}
],
"symlink_target": ""
} |
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to <gamedir>/conf/settings.py.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
from builtins import range
import os
import sys
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ['0.0.0.0']
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see OOB_FUNC_MODULE). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(8000, 5001)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ['0.0.0.0']
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ['127.0.0.1']
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version of the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient.
WEBSOCKET_CLIENT_PORT = 8001
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = '0.0.0.0'
# Actual URL for webclient component to reach the websocket. You only need
# to set this if you know you need it, like using some sort of proxy setup.
# If given it must be on the form "ws://hostname" (WEBSOCKET_CLIENT_PORT will
# be automatically appended). If left at None, the client will itself
# figure out this url based on the server's hostname.
WEBSOCKET_CLIENT_URL = None
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [8022]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ['0.0.0.0']
# Activate SSL protocol (SecureSocketLibrary)
SSL_ENABLED = False
# Ports to use for SSL
SSL_PORTS = [4001]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ['0.0.0.0']
# Activate custom websocket support. This is unrelated to the websocket client!
# This is intended to be used by optional third-party connections/applications
# or clients.
WEBSOCKET_ENABLED = False
# Ports to use for Websockets
WEBSOCKET_PORTS = [8021]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_INTERFACES = ['0.0.0.0']
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# Path to the lib directory containing the bulk of the codebase's code.
EVENNIA_DIR = os.path.dirname(os.path.abspath(__file__))
# Path to the game directory (containing the database file if using sqlite).
if sys.argv[1] == 'test' if len(sys.argv)>1 else False:
# unittesting mode
GAME_DIR = os.getcwd()
else:
# Fallback location (will be replaced by the actual game dir at runtime)
GAME_DIR = os.path.join(EVENNIA_DIR, 'game_template')
# Place to put log files
LOG_DIR = os.path.join(GAME_DIR, 'server', 'logs')
SERVER_LOG_FILE = os.path.join(LOG_DIR, 'server.log')
PORTAL_LOG_FILE = os.path.join(LOG_DIR, 'portal.log')
HTTP_LOG_FILE = os.path.join(LOG_DIR, 'http_requests.log')
# Rotate log files when server and/or portal stops. This will keep log
# file sizes down. Turn off to get ever growing log files and never
# loose log info.
CYCLE_LOGFILES = True
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'UTC'
# Activate time zone in datetimes
USE_TZ = True
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = (
'evennia.web.utils.backends.CaseInsensitiveModelBackend',)
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = 'en-us'
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timeout completely.
IDLE_TIMEOUT = 3600
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer.
IDLE_COMMAND = "idle"
# The set of encodings tried. A Player object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your players are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# Regular expression applied to all output to a given session in order
# to strip away characters (usually various forms of decorations) for the benefit
# of users with screen readers. Note that ANSI/MXP doesn't need to
# be stripped this way, that is handled automatically.
SCREENREADER_REGEX_STRIP = r"\+-+|\+$|\+~|--+|~~+|==+"
# The game server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = 'localhost'
AMP_PORT = 5000
AMP_INTERFACE = '127.0.0.1'
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
# This determines how many connections per second the Portal should
# accept, as a DoS countermeasure. If the rate exceeds this number, incoming
# connections will be queued to this rate, so none will be lost.
# Must be set to a value > 0.
MAX_CONNECTION_RATE = 2
# Determine how many commands per second a given Session is allowed
# to send to the Portal via a connected protocol. Too high rate will
# drop the command and echo a warning. Note that this will also cap
# OOB messages so don't set it too low if you expect a lot of events
# from the client! To turn the limiter off, set to <= 0.
MAX_COMMAND_RATE = 80
# The warning to echo back to users if they send commands too fast
COMMAND_RATE_WARNING ="You entered commands too fast. Wait a moment and try again."
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.'postgresql_psycopg2',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - db admin password (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(GAME_DIR, 'server', 'evennia.db3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}}
# How long the django-database connection should be kept open, in seconds.
# If you get errors about the database having gone away after long idle
# periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs)
CONN_MAX_AGE = 3600 * 7
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in contrib/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "evennia.commands.cmdparser.cmdparser"
# On a multi-match when search objects or commands, the user has the
# ability to search again with an index marker that differentiates
# the results. If multiple "box" objects are found, they can by
# default use 1-box, 2-box etc to refine the search. Below you
# can change the index separator character used.
SEARCH_MULTIMATCH_SEPARATOR = '-'
# The handler that outputs errors when using any API-level search
# (not manager methods). This function should correctly report errors
# both for command- and object-searches.
SEARCH_AT_RESULT = "evennia.utils.utils.at_search_result"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "server.conf.connection_screens"
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = "server.conf.at_initial_setup"
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = "server.conf.at_server_startstop"
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = ["server.conf.server_services_plugins"]
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = ["server.conf.portal_services_plugins"]
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many players you have etc.
MSSP_META_MODULE = "server.conf.mssp"
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("evennia.locks.lockfuncs", "server.conf.lockfuncs",)
# Module holding OOB (Out of Band) hook objects. This allows for customization
# and expansion of which hooks OOB protocols are allowed to call on the server
# protocols for attaching tracker hooks for when various object field change
OOB_PLUGIN_MODULES = ["evennia.server.oob_cmds", "server.conf.oobfuncs"]
# Module holding settings/actions for the dummyrunner program (see the
# dummyrunner for more information)
DUMMYRUNNER_SETTINGS_MODULE = "evennia.server.profiling.dummyrunner_settings"
######################################################################
# Default command sets
######################################################################
# Note that with the exception of the unloggedin set (which is not
# stored anywhere in the database), changing these paths will only affect
# NEW created characters/objects, not those already in play. So if you plan to
# change this, it's recommended you do it before having created a lot of objects
# (or simply reset the database after the change for simplicity).
# Command set used on session before player has logged in
CMDSET_UNLOGGEDIN = "commands.default_cmdsets.UnloggedinCmdSet"
# Command set used on the logged-in session
CMDSET_SESSION = "commands.default_cmdsets.SessionCmdSet"
# Default set for logged in player with characters (fallback)
CMDSET_CHARACTER = "commands.default_cmdsets.CharacterCmdSet"
# Command set for players without a character (ooc)
CMDSET_PLAYER = "commands.default_cmdsets.PlayerCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["commands", "evennia", "contribs"]
######################################################################
# Typeclasses and other paths
######################################################################
# Server-side session class used.
SERVER_SESSION_CLASS = "evennia.server.serversession.ServerSession"
# These are paths that will be prefixed to the paths given if the
# immediately entered path fail to find a typeclass. It allows for
# shorter input strings. They must either base off the game directory
# or start from the evennia library.
TYPECLASS_PATHS = ["typeclasses", "evennia", "evennia.contrib", "evennia.contrib.tutorial_examples"]
# Typeclass for player objects (linked to a character) (fallback)
BASE_PLAYER_TYPECLASS = "typeclasses.players.Player"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "typeclasses.objects.Object"
# Typeclass for character objects linked to a player (fallback)
BASE_CHARACTER_TYPECLASS = "typeclasses.characters.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "typeclasses.rooms.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "typeclasses.exits.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "typeclasses.channels.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "typeclasses.scripts.Script"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2,3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = ['world', 'evennia.contrib', 'evennia.contrib.tutorial_examples']
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# evennia.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
# The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# These measures might or might not make sense to your game world.
TIME_SEC_PER_MIN = 60
TIME_MIN_PER_HOUR = 60
TIME_HOUR_PER_DAY = 24
TIME_DAY_PER_WEEK = 7
TIME_WEEK_PER_MONTH = 4
TIME_MONTH_PER_YEAR = 12
######################################################################
# Inlinefunc
######################################################################
# Evennia supports inline function preprocessing. This allows
# users to supply {func() ... {/func in text, performing dynamic
# text formatting and manipulation on the fly. If disabled, such
# inline functions will not be parsed.
INLINEFUNC_ENABLED = False
# Only functions defined globally (and not starting with '_') in
# these modules will be considered valid inlinefuncs. The list
# is loaded from left-to-right, same-named functions will overload
INLINEFUNC_MODULES = ["evennia.utils.inlinefunc",
"evennia.utils.nested_inlinefuncs",
"server.conf.inlinefunc"]
######################################################################
# Default Player setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per player.
# 0 - single session, one player, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one player, one character, each session getting
# the same data
# 2 - multiple sessions, one player, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed for MULTISESSION_MODE 2,3. This is
# checked by the default ooc char-creation command. Forced to 1 for
# MULTISESSION_MODE 0 and 1.
MAX_NR_CHARACTERS = 1
# The access hierarchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions.
PERMISSION_HIERARCHY = ["Guests", # note-only used if GUEST_ENABLED=True
"Players",
"PlayerHelpers",
"Builders",
"Wizards",
"Immortals"]
# The default permission given to all new players
PERMISSION_PLAYER_DEFAULT = "Players"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
CLIENT_DEFAULT_HEIGHT = 45 # telnet standard is 24 but does anyone use such
# low-res displays anymore?
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest"
GUEST_ENABLED = False
# Typeclass for guest player objects (linked to a character)
BASE_GUEST_TYPECLASS = "typeclasses.players.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# players/characters. The size of this list also determines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s+1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# This is a list of global channels created by the
# initialization script the first time Evennia starts.
# The superuser (user #1) will be automatically subscribed
# to all channels in this list. Each channel is described by
# a dictionary keyed with the same keys valid as arguments
# to the evennia.create.create_channel() function.
# Note: Evennia will treat the first channel in this list as
# the general "public" channel and the second as the
# general "mud info" channel. Other channels beyond that
# are up to the admin to design and call appropriately.
DEFAULT_CHANNELS = [
# public channel
{"key": "Public",
"aliases": ('ooc', 'pub'),
"desc": "Public discussion",
"locks": "control:perm(Wizards);listen:all();send:all()"},
# connection/mud info
{"key": "MudInfo",
"aliases": "",
"desc": "Connection log",
"locks": "control:perm(Immortals);listen:perm(Wizards);send:false()"}
]
######################################################################
# External Channel connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients. IRC and IMC2
# requires that you have twisted.words installed.
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED=False
RSS_UPDATE_INTERVAL = 60*10 # 10 minutes
# IMC (Inter-MUD communication) allows to connect an Evennia channel
# to an IMC2 server. This lets them talk to people on other MUDs also
# using IMC. Evennia's IMC2 client was developed against MudByte's
# network. You must register your MUD on the network before you can
# use it, go to http://www.mudbytes.net/imc2-intermud-join-network.
# Choose 'Other unsupported IMC2 version' from the choices and and
# enter your information there. You should enter the same 'short mud
# name' as your SERVERNAME above, then choose imc network server as
# well as client/server passwords same as below. When enabled, the
# command @imc2chan becomes available in-game and allows you to
# connect Evennia channels to IMC channels on the network. The Evennia
# discussion channel 'ievennia' is on server01.mudbytes.net:5000.
# NOTE - IMC2 is currently NOT FUNCTIONAL due to lack of testing means.
IMC2_ENABLED = False
IMC2_NETWORK = "server01.mudbytes.net"
IMC2_PORT = 5000 # this is the imc2 port, not on localhost
IMC2_CLIENT_PWD = ""
IMC2_SERVER_PWD = ""
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# While true, show "pretty" error messages for template syntax errors.
TEMPLATE_DEBUG = DEBUG
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather prefer nobody receives emails, leave this commented out or empty.
ADMINS = () #'Your Name', 'your_email@domain.com'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# Absolute path to the directory that holds file uploads from web apps.
# Example: "/home/media/media.lawrence.com"
MEDIA_ROOT = os.path.join(GAME_DIR, "web", "media")
# It's safe to dis-regard this, as it's a Django feature we only half use as a
# dependency, not actually what it's primarily meant for.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = 'sessionid'
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = [os.path.join(EVENNIA_DIR, "locale/")]
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = 'web.urls' #src.web.urls?
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = '/'
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = '/accounts/login'
# Where to redirect users who wish to logout.
LOGOUT_URL = '/accounts/login'
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Django1.4+ will look for admin files under
# STATIC_URL/admin.
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(GAME_DIR, "web", "static")
# Directories from which static files will be gathered.
STATICFILES_DIRS = (
os.path.join(GAME_DIR, "web", "static_overrides"),
os.path.join(EVENNIA_DIR, "web", "static"),)
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ('README.md',)
# The name of the currently selected web template. This corresponds to the
# directory names shown in the webtemplates directory.
ACTIVE_TEMPLATE = 'prosimii'
# We setup the location of the website template as well as the admin site.
TEMPLATE_DIRS = (
os.path.join(GAME_DIR, "web", "template_overrides", ACTIVE_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides"),
os.path.join(EVENNIA_DIR, "web", "templates", ACTIVE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "templates"),)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',)
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware', # 1.4?
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',)
# Context processors define context variables, generally for the template
# system to use.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.media',
'django.core.context_processors.debug',
'evennia.web.utils.general_context.general_context',)
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'evennia.utils.idmapper',
'evennia.server',
'evennia.typeclasses',
'evennia.players',
'evennia.objects',
'evennia.comms',
'evennia.help',
'evennia.scripts',
'evennia.web.webclient')
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "players.PlayerDB"
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = 'evennia.server.tests.EvenniaTestSuiteRunner'
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
except ImportError:
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the signing key for the cookies generated by Evennia's
# web interface.
#
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = 'changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS'
| {
"content_hash": "6dc5b0dd542636a2bbf47f5dc8903fc7",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 100,
"avg_line_length": 48.37444933920705,
"alnum_prop": 0.6892207752785113,
"repo_name": "shollen/evennia",
"id": "646cb8b641beb3e19d8c00fc32fb6129c4940bb0",
"size": "32943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evennia/settings_default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "22126"
},
{
"name": "Python",
"bytes": "2092384"
}
],
"symlink_target": ""
} |
import astropy.units as u
import numpy as np
import pytest
# Custom
from ....integrate import DOPRI853Integrator
from ....potential import (Hamiltonian, HernquistPotential, MilkyWayPotential,
ConstantRotatingFrame)
from ....units import galactic
from ...core import PhaseSpacePosition
# Project
from ..df import StreaklineStreamDF, FardalStreamDF, LagrangeCloudStreamDF
_DF_CLASSES = [StreaklineStreamDF, FardalStreamDF, LagrangeCloudStreamDF]
_DF_KWARGS = [{}, {}, {'v_disp': 1*u.km/u.s}]
_TEST_POTENTIALS = [HernquistPotential(m=1e12, c=5, units=galactic),
MilkyWayPotential()]
@pytest.mark.parametrize('DF,DF_kwargs', zip(_DF_CLASSES, _DF_KWARGS))
@pytest.mark.parametrize('pot', _TEST_POTENTIALS)
def test_init_sample(DF, DF_kwargs, pot):
H = Hamiltonian(pot)
orbit = H.integrate_orbit([10., 0, 0, 0, 0.2, 0], dt=1., n_steps=100)
n_times = len(orbit.t)
# Different ways to initialize successfully:
df = DF(**DF_kwargs)
o = df.sample(orbit, 1e4*u.Msun)
assert len(o.x) == 2 * n_times
df = DF(lead=False, **DF_kwargs)
o = df.sample(orbit, 1e4*u.Msun)
assert len(o.x) == n_times
df = DF(trail=False, **DF_kwargs)
o = df.sample(orbit, 1e4*u.Msun)
assert len(o.x) == n_times
df1 = DF(random_state=np.random.RandomState(42), **DF_kwargs)
o1 = df1.sample(orbit, 1e4*u.Msun)
df2 = DF(random_state=np.random.RandomState(42), **DF_kwargs)
o2 = df2.sample(orbit, 1e4*u.Msun)
assert u.allclose(o1.xyz, o2.xyz)
assert u.allclose(o1.v_xyz, o2.v_xyz)
assert len(o1.x) == 2 * n_times
@pytest.mark.parametrize('DF,DF_kwargs', zip(_DF_CLASSES, _DF_KWARGS))
def test_expected_failure(DF, DF_kwargs):
# Expected failure:
with pytest.raises(ValueError):
DF(lead=False, trail=False, **DF_kwargs)
def test_rotating_frame():
DF = _DF_CLASSES[0]
H_static = Hamiltonian(_TEST_POTENTIALS[0])
w0 = PhaseSpacePosition(pos=[10., 0, 0]*u.kpc,
vel=[0, 220, 0.]*u.km/u.s,
frame=H_static.frame)
int_kwargs = dict(w0=w0, dt=1, n_steps=100,
Integrator=DOPRI853Integrator)
orbit_static = H_static.integrate_orbit(**int_kwargs)
rframe = ConstantRotatingFrame([0, 0, -40] * u.km/u.s/u.kpc,
units=galactic)
H_rotating = Hamiltonian(_TEST_POTENTIALS[0],
frame=rframe)
orbit_rotating = H_rotating.integrate_orbit(**int_kwargs)
_o = orbit_rotating.to_frame(H_static.frame)
assert u.allclose(_o.xyz, orbit_static.xyz, atol=1e-13*u.kpc)
assert u.allclose(_o.v_xyz, orbit_static.v_xyz, atol=1e-13*u.km/u.s)
df_static = DF(trail=False)
xvt_static = df_static.sample(orbit_static, 1e6*u.Msun)
df_rotating = DF(trail=False)
xvt_rotating = df_rotating.sample(orbit_rotating, 1e6*u.Msun)
xvt_rotating_static = xvt_rotating.to_frame(H_static.frame,
t=xvt_rotating.release_time)
assert u.allclose(xvt_static.xyz, xvt_rotating_static.xyz,
atol=1e-9*u.kpc)
assert u.allclose(xvt_static.v_xyz, xvt_rotating_static.v_xyz,
atol=1e-9*u.kpc/u.Myr)
| {
"content_hash": "4a8a9866cbc10bd75f36403d1f7fdb5f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 35.84615384615385,
"alnum_prop": 0.6232372777437155,
"repo_name": "adrn/gary",
"id": "60a7c5e510f35d6f2f6be1fe73c5090e72c349b2",
"size": "3276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gala/dynamics/mockstream/tests/test_df.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "61297"
},
{
"name": "C++",
"bytes": "7004"
},
{
"name": "Python",
"bytes": "523293"
}
],
"symlink_target": ""
} |
from django.forms import ModelForm
from django import forms
from modulos.ventas.models import Ventas
class FormVentas(ModelForm):
class Meta:
model = Ventas
| {
"content_hash": "2c5d26c6388a6f6146c5247b2bb42515",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 40,
"avg_line_length": 19.11111111111111,
"alnum_prop": 0.7558139534883721,
"repo_name": "urkh/erp",
"id": "f3d38760fdcdfe543bbf1f76393810c75e6662c9",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modulos/ventas/forms.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "367922"
},
{
"name": "JavaScript",
"bytes": "1780907"
},
{
"name": "PHP",
"bytes": "170634"
},
{
"name": "Python",
"bytes": "14758"
}
],
"symlink_target": ""
} |
"""API test module."""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import datetime
import types
import pywikibot.data.api as api
import pywikibot.family
import pywikibot.login
import pywikibot.page
import pywikibot.site
from pywikibot.tools import (
MediaWikiVersion,
PY2,
UnicodeType,
)
from tests.aspects import (
unittest,
TestCase,
DefaultSiteTestCase,
DefaultDrySiteTestCase,
)
from tests.utils import allowed_failure, FakeLoginManager, PatchedHttp
if not PY2:
from urllib.parse import unquote_to_bytes
else:
from urllib import unquote_plus as unquote_to_bytes
class TestAPIMWException(DefaultSiteTestCase):
"""Test raising an APIMWException."""
data = {'error': {'code': 'internal_api_error_fake',
'info': 'Fake error message'},
'servedby': 'unittest',
}
def _dummy_request(self, **kwargs):
self.assertIn('body', kwargs)
self.assertIn('uri', kwargs)
self.assertIn('site', kwargs)
if kwargs['body'] is None:
# use uri and remove script path
parameters = kwargs['uri']
prefix = kwargs['site'].scriptpath() + '/api.php?'
self.assertEqual(prefix, parameters[:len(prefix)])
parameters = parameters[len(prefix):]
else:
parameters = kwargs['body']
parameters = parameters.encode('ascii') # it should be bytes anyway
# Extract parameter data from the body, it's ugly but allows us
# to verify that we actually test the right request
parameters = [p.split(b'=', 1) for p in parameters.split(b'&')]
keys = [p[0].decode('ascii') for p in parameters]
values = [unquote_to_bytes(p[1]) for p in parameters]
values = [v.decode(kwargs['site'].encoding()) for v in values]
values = [v.replace('+', ' ') for v in values]
values = [set(v.split('|')) for v in values]
parameters = dict(zip(keys, values))
if 'fake' not in parameters:
return False # do an actual request
if self.assert_parameters:
for param, value in self.assert_parameters.items():
self.assertIn(param, parameters)
if value is not None:
if isinstance(value, UnicodeType):
value = value.split('|')
self.assertLessEqual(set(value), parameters[param])
return self.data
def test_API_error(self):
"""Test a static request."""
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True})
with PatchedHttp(api, self.data):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_ASCII(self):
"""Test a Page instance as parameter using ASCII chars."""
page = pywikibot.page.Page(self.site, 'ASCII')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_Unicode(self):
"""Test a Page instance as parameter using non-ASCII chars."""
page = pywikibot.page.Page(self.site, 'Ümlä üt')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
class TestApiFunctions(DefaultSiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor with implicit site creation."""
req = api.Request(action="test", foo="", bar="test")
self.assertTrue(req)
self.assertEqual(req.site, self.get_site())
class TestDryApiFunctions(DefaultDrySiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor."""
mysite = self.get_site()
req = api.Request(site=mysite, action="test", foo="", bar="test")
self.assertTrue(req)
self.assertEqual(req.site, mysite)
self.assertIn("foo", req._params)
self.assertEqual(req["bar"], ["test"])
# test item assignment
req["one"] = "1"
self.assertEqual(req._params['one'], ["1"])
# test compliance with dict interface
# req.keys() should contain "action", "foo", "bar", "one"
self.assertEqual(len(req.keys()), 4)
self.assertIn("test", req._encoded_items().values())
for item in req.items():
self.assertEqual(len(item), 2, item)
def test_mixed_mode(self):
"""Test if parameters is used with kwargs."""
req1 = api.Request(site=self.site, action='test', parameters='foo')
self.assertIn('parameters', req1._params)
req2 = api.Request(site=self.site, parameters={'action': 'test',
'parameters': 'foo'})
self.assertEqual(req2['parameters'], ['foo'])
self.assertEqual(req1._params, req2._params)
class TestParamInfo(DefaultSiteTestCase):
"""Test ParamInfo."""
def test_init(self):
"""Test common initialization."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
len(pi.preloaded_modules))
self.assertIn('info', pi.query_modules)
self.assertIn('login', pi._action_modules)
def test_init_query_first(self):
"""Test init where it first adds query and then main."""
def patched_generate_submodules(modules):
# Change the query such that query is handled before main
modules = set(modules)
if 'main' in modules:
assert 'query' in modules
modules.discard('main')
modules = list(modules) + ['main']
else:
assert 'query' not in modules
original_generate_submodules(modules)
pi = api.ParamInfo(self.site, set(['query', 'main']))
self.assertEqual(len(pi), 0)
original_generate_submodules = pi._generate_submodules
pi._generate_submodules = patched_generate_submodules
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
def test_init_pageset(self):
"""Test initializing with only the pageset."""
site = self.get_site()
self.assertNotIn('query', api.ParamInfo.init_modules)
pi = api.ParamInfo(site, set(['pageset']))
self.assertNotIn('query', api.ParamInfo.init_modules)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
if 'query' in pi.preloaded_modules:
self.assertIn('query', pi._paraminfo)
self.assertEqual(len(pi), 4)
else:
self.assertNotIn('query', pi._paraminfo)
self.assertEqual(len(pi), 3)
self.assertEqual(len(pi),
len(pi.preloaded_modules))
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
generators_param = pi.parameter('pageset', 'generator')
self.assertGreater(len(generators_param['type']), 1)
def test_generators(self):
"""Test requesting the generator parameter."""
site = self.get_site()
pi = api.ParamInfo(site, set(['pageset', 'query']))
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
pageset_generators_param = pi.parameter('pageset', 'generator')
query_generators_param = pi.parameter('query', 'generator')
self.assertEqual(pageset_generators_param, query_generators_param)
def test_with_module_info(self):
"""Test requesting the module info."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['info']['prefix'], 'in')
param = pi.parameter('info', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('protection', param['type'])
def test_with_module_revisions(self):
"""Test requesting the module revisions."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['revisions'])
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['revisions']['prefix'], 'rv')
param = pi.parameter('revisions', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('user', param['type'])
def test_multiple_modules(self):
"""Test requesting multiple modules in one fetch."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info', 'revisions'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
2 + len(pi.preloaded_modules))
def test_with_invalid_module(self):
"""Test requesting different kind of invalid modules."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch('foobar')
self.assertNotIn('foobar', pi._paraminfo)
self.assertRaises(KeyError, pi.__getitem__, 'foobar')
self.assertRaises(KeyError, pi.__getitem__, 'foobar+foobar')
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
len(pi.preloaded_modules))
def test_submodules(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertIn('query', pi._modules)
self.assertIsInstance(pi._modules['query'], frozenset)
self.assertIn('revisions', pi._modules['query'])
self.assertEqual(pi.submodules('query'), pi.query_modules)
for mod in pi.submodules('query', True):
self.assertEqual(mod[:6], 'query+')
self.assertEqual(mod[6:], pi[mod]['name'])
self.assertEqual(mod, pi[mod]['path'])
self.assertRaises(KeyError, pi.__getitem__, 'query+foobar')
self.assertRaises(KeyError, pi.submodules, 'edit')
def test_query_modules_with_limits(self):
"""Test query_modules_with_limits property."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.query_modules_with_limits)
self.assertNotIn('info', pi.query_modules_with_limits)
def test_modules(self):
"""Test v1.8 modules exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.modules)
self.assertIn('help', pi.modules)
self.assertIn('allpages', pi.modules)
for mod in pi.modules:
self.assertNotIn('+', mod)
def test_module_paths(self):
"""Test module paths use the complete paths."""
pi = api.ParamInfo(self.site)
self.assertIn('help', pi.module_paths)
self.assertNotIn('revisions', pi.module_paths)
self.assertIn('query+revisions', pi.module_paths)
self.assertNotIn('allpages', pi.module_paths)
self.assertIn('query+allpages', pi.module_paths)
def test_prefixes(self):
"""Test v1.8 module prefixes exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.prefixes)
self.assertIn('login', pi.prefixes)
self.assertIn('allpages', pi.prefixes)
def test_prefix_map(self):
"""Test module prefixes use the path."""
pi = api.ParamInfo(self.site)
self.assertIn('query+revisions', pi.prefix_map)
self.assertIn('login', pi.prefix_map)
self.assertIn('query+allpages', pi.prefix_map)
for mod in pi.prefix_map:
self.assertEqual(mod, pi[mod]['path'])
def test_attributes(self):
"""Test attributes method."""
pi = api.ParamInfo(self.site)
attributes = pi.attributes('mustbeposted')
self.assertIn('edit', attributes)
for mod, value in attributes.items():
self.assertEqual(mod, pi[mod]['path'])
self.assertEqual(value, '')
def test_old_mode(self):
"""Test the old mode explicitly."""
site = self.get_site()
pi = api.ParamInfo(site, modules_only_mode=False)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
def test_new_mode(self):
"""Test the new modules-only mode explicitly."""
site = self.get_site()
if MediaWikiVersion(site.version()) < MediaWikiVersion('1.25wmf4'):
raise unittest.SkipTest(
"version %s doesn't support the new paraminfo api"
% site.version())
pi = api.ParamInfo(site, modules_only_mode=True)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
class TestOtherSubmodule(TestCase):
"""Test handling multiple different modules having submodules."""
family = 'mediawiki'
code = 'mediawiki'
def test_other_submodule(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertNotIn('flow', pi._modules)
pi.fetch(['flow'])
self.assertIn('flow', pi._modules)
other_modules = set()
for modules in pi._modules.values():
self.assertIsInstance(modules, frozenset)
other_modules |= modules
other_modules -= pi.action_modules
other_modules -= pi.query_modules
self.assertLessEqual(other_modules & pi.submodules('flow'),
pi.submodules('flow'))
self.assertFalse(other_modules & pi.modules)
class TestParaminfoModules(DefaultSiteTestCase):
"""Test loading all paraminfo modules."""
def test_action_modules(self):
"""Test loading all action modules."""
self.site._paraminfo.fetch(self.site._paraminfo.action_modules)
def test_query_modules(self):
"""Test loading all query modules."""
self.site._paraminfo.fetch(self.site._paraminfo.query_modules)
class TestOptionSet(TestCase):
"""OptionSet class test class."""
family = 'wikipedia'
code = 'en'
def test_non_lazy_load(self):
"""Test OptionSet with initialised site."""
options = api.OptionSet(self.get_site(), 'recentchanges', 'show')
self.assertRaises(KeyError, options.__setitem__, 'invalid_name', True)
self.assertRaises(ValueError, options.__setitem__, 'anon', 'invalid_value')
options['anon'] = True
self.assertCountEqual(['anon'], options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(1, len(options))
self.assertEqual(['anon'], list(options))
self.assertEqual(['anon'], list(options.api_iter()))
options['bot'] = False
self.assertCountEqual(['anon'], options._enabled)
self.assertCountEqual(['bot'], options._disabled)
self.assertEqual(2, len(options))
self.assertEqual(['anon', 'bot'], list(options))
self.assertEqual(['anon', '!bot'], list(options.api_iter()))
options.clear()
self.assertEqual(set(), options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(0, len(options))
self.assertEqual([], list(options))
self.assertEqual([], list(options.api_iter()))
def test_lazy_load(self):
"""Test OptionSet with delayed site initialisation."""
options = api.OptionSet()
options['invalid_name'] = True
options['anon'] = True
self.assertIn('invalid_name', options._enabled)
self.assertEqual(2, len(options))
self.assertRaises(KeyError, options._set_site, self.get_site(),
'recentchanges', 'show')
self.assertEqual(2, len(options))
options._set_site(self.get_site(), 'recentchanges', 'show', True)
self.assertEqual(1, len(options))
self.assertRaises(TypeError, options._set_site, self.get_site(),
'recentchanges', 'show')
class TestDryOptionSet(DefaultDrySiteTestCase):
"""OptionSet class test class."""
def test_mutable_mapping(self):
"""Test keys, values and items from MutableMapping."""
options = api.OptionSet()
options['a'] = True
options['b'] = False
options['c'] = None
self.assertCountEqual(['a', 'b'], list(options.keys()))
self.assertCountEqual([True, False], list(options.values()))
self.assertEqual(set(), set(options.values()) - set([True, False]))
self.assertCountEqual([('a', True), ('b', False)], list(options.items()))
class TestDryPageGenerator(TestCase):
"""Dry API PageGenerator object test class."""
family = 'wikipedia'
code = 'en'
dry = True
# api.py sorts 'pages' using the string key, which is not a
# numeric comparison.
titles = ("Broadcaster (definition)", "Wiktionary", "Broadcaster.com",
"Wikipedia:Disambiguation")
def setUp(self):
"""Set up test case."""
super(TestDryPageGenerator, self).setUp()
mysite = self.get_site()
self.gen = api.PageGenerator(site=mysite,
generator="links",
titles="User:R'n'B")
# following test data is copied from an actual api.php response,
# but that query no longer matches this dataset.
# http://en.wikipedia.org/w/api.php?action=query&generator=links&titles=User:R%27n%27B
self.gen.request.submit = types.MethodType(lambda self: {
"query": {"pages": {"296589": {"pageid": 296589,
"ns": 0,
"title": "Broadcaster.com"
},
"13918157": {"pageid": 13918157,
"ns": 0,
"title": "Broadcaster (definition)"
},
"156658": {"pageid": 156658,
"ns": 0,
"title": "Wiktionary"
},
"47757": {"pageid": 47757,
"ns": 4,
"title": "Wikipedia:Disambiguation"
}
}
}
}, self.gen.request)
# On a dry site, the namespace objects only have canonical names.
# Add custom_name for this site namespace, to match the live site.
if 'Wikipedia' not in self.site.namespaces:
self.site.namespaces[4].custom_name = 'Wikipedia'
self.site.namespaces._namespace_names['wikipedia'] = self.site.namespaces[4]
def test_results(self):
"""Test that PageGenerator yields pages with expected attributes."""
self.assertPagelistTitles(self.gen, self.titles)
def test_initial_limit(self):
"""Test the default limit."""
self.assertEqual(self.gen.limit, None) # limit is initally None
def test_set_limit_as_number(self):
"""Test setting the limit using an int."""
for i in range(-2, 4):
self.gen.set_maximum_items(i)
self.assertEqual(self.gen.limit, i)
def test_set_limit_as_string(self):
"""Test setting the limit using an int cast into a string."""
for i in range(-2, 4):
self.gen.set_maximum_items(str(i))
self.assertEqual(self.gen.limit, i)
def test_set_limit_not_number(self):
"""Test setting the limit to not a number."""
with self.assertRaisesRegex(
ValueError,
"invalid literal for int\(\) with base 10: 'test'"):
self.gen.set_maximum_items('test')
def test_limit_equal_total(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(4)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_one(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(1)
self.assertPagelistTitles(self.gen, self.titles[0:1])
def test_limit_zero(self):
"""Test that a limit of zero is the same as limit None."""
self.gen.set_maximum_items(0)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_omit(self):
"""Test that limit omitted is the same as limit None."""
self.gen.set_maximum_items(-1)
self.assertPagelistTitles(self.gen, self.titles)
def test_namespace(self):
"""Test PageGenerator set_namespace."""
self.assertRaises(AssertionError, self.gen.set_namespace, 0)
self.assertRaises(AssertionError, self.gen.set_namespace, 1)
self.assertRaises(AssertionError, self.gen.set_namespace, None)
class TestPropertyGenerator(TestCase):
"""API PropertyGenerator object test class."""
family = 'wikipedia'
code = 'en'
def test_info(self):
"""Test PropertyGenerator with prop 'info'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info",
titles='|'.join(titles))
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('lastrevid', pagedata)
count += 1
self.assertEqual(len(links), count)
def test_one_continuation(self):
"""Test PropertyGenerator with prop 'revisions'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
def test_two_continuations(self):
"""Test PropertyGenerator with prop 'revisions' and 'coordinates'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|coordinates",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
@allowed_failure
def test_many_continuations_limited(self):
"""Test PropertyGenerator with many limited props."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|info|categoryinfo|langlinks|templates",
rvprop="ids|flags|timestamp|user|comment|content",
titles='|'.join(titles))
# An APIError is raised if set_maximum_items is not called.
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 6150
@allowed_failure
def test_two_continuations_limited(self):
"""Test PropertyGenerator with many limited props and continuations."""
# FIXME: test fails
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 11550
# FIXME: test disabled as it takes longer than 10 minutes
def _test_two_continuations_limited_long_test(self):
"""Long duration test, with total & step that are a real scenario."""
mainpage = self.get_mainpage()
links = list(mainpage.backlinks(total=300))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(50)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
class TestDryListGenerator(TestCase):
"""Test ListGenerator."""
family = 'wikipedia'
code = 'en'
dry = True
def setUp(self):
"""Set up test case."""
super(TestDryListGenerator, self).setUp()
mysite = self.get_site()
mysite._paraminfo['query+allpages'] = {
'prefix': 'ap',
'limit': {'max': 10},
'namespace': {'multi': True}
}
mysite._paraminfo.query_modules_with_limits = set(['allpages'])
self.gen = api.ListGenerator(listaction="allpages", site=mysite)
def test_namespace_none(self):
"""Test ListGenerator set_namespace with None."""
self.assertRaises(TypeError, self.gen.set_namespace, None)
def test_namespace_zero(self):
"""Test ListGenerator set_namespace with 0."""
self.gen.set_namespace(0)
class TestCachedRequest(DefaultSiteTestCase):
"""Test API Request caching.
This test class does not use the forced test caching.
"""
cached = False
def test_normal_use(self):
"""Test the caching of CachedRequest with an ordinary request."""
mysite = self.get_site()
mainpage = self.get_mainpage()
# Run the cached query three times to ensure the
# data returned is equal, and the last two have
# the same cache time.
params = {'action': 'query',
'prop': 'info',
'titles': mainpage.title(),
}
req1 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
data1 = req1.submit()
req2 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
data2 = req2.submit()
req3 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
data3 = req3.submit()
self.assertEqual(data1, data2)
self.assertEqual(data2, data3)
self.assertIsNotNone(req2._cachetime)
self.assertIsNotNone(req3._cachetime)
self.assertEqual(req2._cachetime, req3._cachetime)
def test_internals(self):
"""Test the caching of CachedRequest by faking a unique request."""
mysite = self.get_site()
# Run tests on a missing page unique to this test run so it can
# not be cached the first request, but will be cached after.
now = datetime.datetime.now()
params = {'action': 'query',
'prop': 'info',
'titles': 'TestCachedRequest_test_internals ' + str(now),
}
req = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
rv = req._load_cache()
self.assertFalse(rv)
self.assertIsNone(req._data)
self.assertIsNone(req._cachetime)
data = req.submit()
self.assertIsNotNone(req._data)
self.assertIsNone(req._cachetime)
rv = req._load_cache()
self.assertTrue(rv)
self.assertIsNotNone(req._data)
self.assertIsNotNone(req._cachetime)
self.assertGreater(req._cachetime, now)
self.assertEqual(req._data, data)
class TestLazyLoginBase(TestCase):
"""
Test that it tries to login when read API access is denied.
Because there is no such family configured it creates an AutoFamily and
BaseSite on it's own. It's testing against steward.wikimedia.org.
These tests are split into two subclasses as only the first failed login
behaves as expected. All subsequent logins will raise an APIError, making
it impossible to test two scenarios with the same APISite object.
"""
hostname = 'steward.wikimedia.org'
@classmethod
def setUpClass(cls):
"""Set up steward Family."""
super(TestLazyLoginBase, cls).setUpClass()
fam = pywikibot.family.AutoFamily(
'steward', 'https://steward.wikimedia.org/w/api.php')
cls.site = pywikibot.site.APISite('steward', fam)
class TestLazyLoginNotExistUsername(TestLazyLoginBase):
"""Test missing username."""
# FIXME: due to limitations of LoginManager, it will ask the user
# for a password even if the username does not exist, and even if
# pywikibot is not connected to a tty. T100964
def setUp(self):
"""Patch the LoginManager to avoid UI interaction."""
super(TestLazyLoginNotExistUsername, self).setUp()
self.orig_login_manager = pywikibot.data.api.LoginManager
pywikibot.data.api.LoginManager = FakeLoginManager
def tearDown(self):
"""Restore the original LoginManager."""
pywikibot.data.api.LoginManager = self.orig_login_manager
super(TestLazyLoginNotExistUsername, self).tearDown()
def test_access_denied_notexist_username(self):
"""Test the query with a username which does not exist."""
self.site._username = ['Not registered username', None]
req = api.Request(site=self.site, action='query')
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestLazyLoginNoUsername(TestLazyLoginBase):
"""Test no username."""
def test_access_denied_no_username(self):
"""Test the query without a username."""
self.site._username = [None, None]
# FIXME: The following prevents LoginManager
# from loading the username from the config when the site
# username is None. i.e. site.login(user=None) means load
# username from the configuration.
if 'steward' in pywikibot.config.usernames:
del pywikibot.config.usernames['steward']
req = api.Request(site=self.site, action='query')
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestBadTokenRecovery(TestCase):
"""Test that the request recovers from bad tokens."""
family = 'wikipedia'
code = 'test'
write = True
def test_bad_token(self):
"""Test the bad token recovery by corrupting the cache."""
site = self.get_site()
site.tokens._tokens.setdefault(site.user(), {})['edit'] = 'INVALID'
page = pywikibot.Page(site, 'Pywikibot bad token test')
page.text = ('This page is testing whether pywikibot-core rerequests '
'a token when a badtoken error was received.')
page.save(summary='Bad token test')
class TestUrlEncoding(TestCase):
"""Test encode_url() function."""
net = False
def test_url_encoding_from_list(self):
"""Test moving 'token' parameters from a list to the end."""
query = [('action', 'edit'), ('token', 'a'), ('supertoken', 'b'),
('text', 'text')]
expect = 'action=edit&text=text&token=a&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_dict(self):
"""Test moving 'token' parameters from a dict to the end."""
# do not add other keys because dictionary is not deterministic
query = {'supertoken': 'b', 'text': 'text'}
expect = 'text=text&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_unicode(self):
"""Test encoding unicode values."""
query = {'token': 'токен'}
expect = 'token=%D1%82%D0%BE%D0%BA%D0%B5%D0%BD'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_basestring(self):
"""Test encoding basestring values."""
if PY2:
query = {'token': str('test\xe2\x80\x94test'.encode('utf-8'))}
else:
query = {'token': 'test\xe2\x80\x94test'}
expect = str('token=test%C3%A2%C2%80%C2%94test')
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_moving_special_tokens(self):
"""Test moving wpEditToken to the very end."""
query = {'wpEditToken': 'c', 'token': 'b', 'text': 'a'}
expect = 'text=a&token=b&wpEditToken=c'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| {
"content_hash": "0fab8cf04ec70bb3cf3c02fe6b2a34d9",
"timestamp": "",
"source": "github",
"line_count": 1030,
"max_line_length": 94,
"avg_line_length": 37.74951456310679,
"alnum_prop": 0.5853608353479759,
"repo_name": "darthbhyrava/pywikibot-local",
"id": "1b0ee7d83efac31a56add85526bf41c9a384358b",
"size": "38915",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/api_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4195172"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return '{name}'.format(id=self.id, name=self.name)
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
ordering = ('-created_at',)
class BlogQuerySet(models.QuerySet):
def optimized(self):
qs = self.select_related('owner')
qs = qs.prefetch_related('categories')
return qs
class Blog(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='blogs')
title = models.CharField(max_length=255)
description = models.TextField()
categories = models.ManyToManyField('Category', related_name='categories')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = BlogQuerySet.as_manager()
def __str__(self):
return '{title}'.format(title=self.title)
class Meta:
verbose_name = 'Блог'
verbose_name_plural = 'Блоги'
ordering = ('-created_at',)
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL)
blog = models.ForeignKey('blogs.Blog', related_name='posts')
title = models.CharField(max_length=255)
content = models.TextField()
is_published = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return '{title}'.format(title=self.title)
class Meta:
verbose_name = 'Пост'
verbose_name_plural = 'Посты'
ordering = ('-created_at',)
class Like(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='likes')
post = models.ForeignKey(Post, related_name='likes')
def __str__(self):
return 'Лайк от "{author}" к посту "{post}"'.format(author=self.author, post=self.post)
class Meta:
verbose_name = 'Лайк'
verbose_name_plural = 'Лайки'
ordering = ('-created_at',)
| {
"content_hash": "af9e36d3a44ab685536df3b39eedcf06",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 95,
"avg_line_length": 28.695121951219512,
"alnum_prop": 0.6566085847853803,
"repo_name": "WeitBelou/technotrack-web1-spring-2017",
"id": "a3ebd04875d900beee8ff30730390c3f77aff38f",
"size": "2410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogs/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29"
},
{
"name": "HTML",
"bytes": "12209"
},
{
"name": "JavaScript",
"bytes": "1612"
},
{
"name": "Python",
"bytes": "32896"
}
],
"symlink_target": ""
} |
import unittest
import copy
from IPython.display import Markdown, display
import numpy as np
from frozenlake import FrozenLakeEnv
def printmd(string):
display(Markdown(string))
def policy_evaluation_soln(env, policy, gamma=1, theta=1e-8):
V = np.zeros(env.nS)
while True:
delta = 0
for s in range(env.nS):
Vs = 0
for a, action_prob in enumerate(policy[s]):
for prob, next_state, reward, done in env.P[s][a]:
Vs += action_prob * prob * (reward + gamma * V[next_state])
delta = max(delta, np.abs(V[s]-Vs))
V[s] = Vs
if delta < theta:
break
return V
def q_from_v_soln(env, V, s, gamma=1):
q = np.zeros(env.nA)
for a in range(env.nA):
for prob, next_state, reward, done in env.P[s][a]:
q[a] += prob * (reward + gamma * V[next_state])
return q
def policy_improvement_soln(env, V, gamma=1):
policy = np.zeros([env.nS, env.nA]) / env.nA
for s in range(env.nS):
q = q_from_v_soln(env, V, s, gamma)
best_a = np.argwhere(q==np.max(q)).flatten()
policy[s] = np.sum([np.eye(env.nA)[i] for i in best_a], axis=0)/len(best_a)
return policy
def policy_iteration_soln(env, gamma=1, theta=1e-8):
policy = np.ones([env.nS, env.nA]) / env.nA
while True:
V = policy_evaluation_soln(env, policy, gamma, theta)
new_policy = policy_improvement_soln(env, V)
if (new_policy == policy).all():
break;
policy = copy.copy(new_policy)
return policy, V
env = FrozenLakeEnv()
random_policy = np.ones([env.nS, env.nA]) / env.nA
class Tests(unittest.TestCase):
def policy_evaluation_check(self, policy_evaluation):
soln = policy_evaluation_soln(env, random_policy)
to_check = policy_evaluation(env, random_policy)
np.testing.assert_array_almost_equal(soln, to_check)
def q_from_v_check(self, q_from_v):
V = policy_evaluation_soln(env, random_policy)
soln = np.zeros([env.nS, env.nA])
to_check = np.zeros([env.nS, env.nA])
for s in range(env.nS):
soln[s] = q_from_v_soln(env, V, s)
to_check[s] = q_from_v(env, V, s)
np.testing.assert_array_almost_equal(soln, to_check)
def policy_improvement_check(self, policy_improvement):
V = policy_evaluation_soln(env, random_policy)
new_policy = policy_improvement(env, V)
new_V = policy_evaluation_soln(env, new_policy)
self.assertTrue(np.all(new_V >= V))
def policy_iteration_check(self, policy_iteration):
policy_soln, _ = policy_iteration_soln(env)
policy_to_check, _ = policy_iteration(env)
soln = policy_evaluation_soln(env, policy_soln)
to_check = policy_evaluation_soln(env, policy_to_check)
np.testing.assert_array_almost_equal(soln, to_check)
def truncated_policy_iteration_check(self, truncated_policy_iteration):
self.policy_iteration_check(truncated_policy_iteration)
def value_iteration_check(self, value_iteration):
self.policy_iteration_check(value_iteration)
check = Tests()
def run_check(check_name, func):
try:
getattr(check, check_name)(func)
except check.failureException as e:
printmd('**<span style="color: red;">PLEASE TRY AGAIN</span>**')
return
printmd('**<span style="color: green;">PASSED</span>**') | {
"content_hash": "ce33a46eaaf9135a2457d858713b059f",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 83,
"avg_line_length": 35.84375,
"alnum_prop": 0.6140656785818076,
"repo_name": "hetaodie/hetaodie.github.io",
"id": "9d3146b0cc3d2cd292f9b092b563aef9d4f6f755",
"size": "3441",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "assets/media/uda-ml/qinghua/dongtaiguihua/迷你项目:动态规划(第 2 部分)/check_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6473"
},
{
"name": "HTML",
"bytes": "3209874"
},
{
"name": "JavaScript",
"bytes": "5139"
},
{
"name": "Jupyter Notebook",
"bytes": "12180442"
},
{
"name": "Python",
"bytes": "161358"
},
{
"name": "Shell",
"bytes": "6554"
}
],
"symlink_target": ""
} |
from taskplus.core.shared.action import Action
from taskplus.core.shared.response import ResponseSuccess
from taskplus.core.shared.request import Request
class AssignUserToTaskAction(Action):
def __init__(self, tasks_repo, users_repo):
super().__init__()
self.tasks_repo = tasks_repo
self.users_repo = users_repo
def process_request(self, request):
task_id = request.task_id
user_id = request.user_id
user = self.users_repo.one(user_id)
task = self.tasks_repo.one(task_id)
self._call_before_execution_hooks(dict(request=request, task=task))
task.doer = user
response = self.tasks_repo.update(task)
self._call_after_execution_hooks(dict(request=request, task=response))
return ResponseSuccess(response)
class AssignUserToTaskRequest(Request):
def __init__(self, task_id, user_id):
super().__init__()
self.task_id = task_id
self.user_id = user_id
def _validate(self):
self.errors = []
if not self.task_id:
self._add_error('task_id', 'is required')
elif not isinstance(self.task_id, int):
self._add_error('task_id', 'expected int, got {}({})'.format(
self.task_id.__class__.__name__, self.task_id
))
if not self.user_id:
self._add_error('user_id', 'is required')
elif not isinstance(self.user_id, int):
self._add_error('user_id', 'expected int, got {}({})'.format(
self.user_id.__class__.__name__, self.user_id
))
| {
"content_hash": "bc4dc42c4601adba4556332c0fd356c4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 32.08,
"alnum_prop": 0.594139650872818,
"repo_name": "Himon-SYNCRAFT/taskplus",
"id": "c8663fd656f19018e75d07b5afa5500054dfec4a",
"size": "1604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskplus/core/actions/assign_user_to_task.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "246743"
}
],
"symlink_target": ""
} |
from ....testing import assert_equal
from ..tracks import StreamlineTractography
def test_StreamlineTractography_inputs():
input_map = dict(gfa_thresh=dict(mandatory=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(mandatory=True,
),
in_model=dict(),
in_peaks=dict(),
min_angle=dict(mandatory=True,
usedefault=True,
),
multiprocess=dict(mandatory=True,
usedefault=True,
),
num_seeds=dict(mandatory=True,
usedefault=True,
),
out_prefix=dict(),
peak_threshold=dict(mandatory=True,
usedefault=True,
),
save_seeds=dict(mandatory=True,
usedefault=True,
),
seed_coord=dict(),
seed_mask=dict(),
tracking_mask=dict(),
)
inputs = StreamlineTractography.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_StreamlineTractography_outputs():
output_map = dict(gfa=dict(),
odf_peaks=dict(),
out_seeds=dict(),
tracks=dict(),
)
outputs = StreamlineTractography.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "2894f91b70c382ffa2a1159d29404958",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 26.32075471698113,
"alnum_prop": 0.646594982078853,
"repo_name": "FCP-INDI/nipype",
"id": "b4c4dae67933410cc1da056ce3ca6b3b12308a24",
"size": "1449",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5280923"
},
{
"name": "Shell",
"bytes": "1958"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.oauthlib.provider import OAuth2Provider
from flask.ext.login import LoginManager
db = SQLAlchemy()
oauth = OAuth2Provider()
login_manager = LoginManager()
| {
"content_hash": "6d3d11e4e5c4c068d3f003bb6b2b5774",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 27,
"alnum_prop": 0.8148148148148148,
"repo_name": "taras1k/flask_auth_service",
"id": "b32b322b0781912eac9028623eaa6593a1140d70",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/extensions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "18161"
}
],
"symlink_target": ""
} |
class Agent:
def __init__(self):
self.initQ()
def initQ(self):
self.nbQ = {}
self.q = {}
self.alpha = {}
def QValue(self,grid,action):
if (grid,action) in self.q:
return self.q[(grid,action)]
else:
return 0.0
def countQ(self,grid,action):
if (grid,action) in self.nbQ:
return self.nbQ[(grid,action)]
else:
return 0
def alphaValue(self,grid,action):
if (grid,action) in self.alpha:
return self.alpha[(grid,action)]
else:
return 1.0
def setQ(self,grid,action,value):
self.q[(grid,action)] = value
if (grid,action) in self.nbQ:
self.nbQ[(grid,action)] += 1
else:
self.nbQ[(grid,action)] = 1
if (grid,action) in self.alpha:
self.alpha[(grid,action)] *= 0.99
else:
self.alpha[(grid,action)] = 1
def getMaxQAction(self,grid, possibleActions):
return max([(self.QValue(grid,a),a) for a in possibleActions])[1]
def getMaxQValue(self,grid, possibleActions):
if len(possibleActions) == 0:
return 0
else:
return max([self.QValue(grid,a) for a in possibleActions])
def getQValues(self,grid, possibleActions):
return [(a,self.QValue(grid,a)) for a in possibleActions] | {
"content_hash": "220e658964b9eba4185066f28142f397",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 74,
"avg_line_length": 28.510204081632654,
"alnum_prop": 0.5433070866141733,
"repo_name": "RaphaelLapierre/INF4215",
"id": "7f616f104ab93453d0104300453a092d4b8a3ab2",
"size": "1513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TP3/RiskAI/Agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Prolog",
"bytes": "7775"
},
{
"name": "Python",
"bytes": "15991"
},
{
"name": "TeX",
"bytes": "6672"
}
],
"symlink_target": ""
} |
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime, time
import logging
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.api import taskqueue
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import BooleanMessage
from models import ConflictException
from models import StringMessage
from models import Session
from models import SessionForm
from models import SessionForms
from models import SpeakerForm
from settings import WEB_CLIENT_ID
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT ANNOUNCEMENTS"
MEMCACHE_FEAT_SPKR_KEY = "FEATURED SPEAKER"
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1, required=True),
sessionType=messages.StringField(2),
speaker=messages.StringField(3)
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1, required=True),
)
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1, required=True),
)
WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1, required=True),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference & return (modified) ConferenceForm
# send confirmation email to organizer
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conf found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
# --Conference related endpoints --------------------------------------------
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conf found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in
conferences]
)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Given a conference, returns all sessions."""
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# fetch conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# create ancestor query for all key matches for this conference
sessions = Session.query(ancestor=conf.key).fetch()
# return set of SessionForm objects per Session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(SESSION_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions/by_type/{sessionType}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""For a conference ,Return all sessions of
a specified type(eg of types: lecture, workshop)"""
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
sessionType = data['sessionType']
# fetch existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# create ancestor query for all key matches for this conference
sessions = Session.query(ancestor=conf.key).filter(Session.sessionType == sessionType)
# return set of SessionForm objects per session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(SESSION_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions/by_speaker/{speaker}',
http_method='GET', name='getConferenceSessionsBySpeaker')
def getConferenceSessionsBySpeaker(self, request):
"""For a conference ,Return all sessions of
by a specified speaker """
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
sessionSpeaker = data['speaker']
# fetch existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# create ancestor query for all key matches for this conference
sessions = Session.query(ancestor=conf.key).filter(Session.speaker == sessionSpeaker)
# return set of SessionForm objects per session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
# - - - Session objects - - - - - - - - - - - - - - - - - - -
def _createSessionObject(self, request):
"""Create or update Session object, returning SessionForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Session'name' field required")
# fetch and check conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'Conference not found for key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'You need to be the owner to add sessions.')
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# convert dates from strings to Date objects
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
# convert time from strings to Time object (date-independent)
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
# make session key from conf key
p_key = conf.key
# allocate new session id with conf
s_id = Session.allocate_ids(size=1, parent=p_key)[0]
s_key = ndb.Key(Session, s_id, parent=p_key)
data['key'] = s_key
data['organizerUserId'] = user_id
del data['websafeConferenceKey']
del data['websafeKey']
Session(**data).put()
# Add to the task queue a task for setting cache
# Task will check if speaker is in more than one session
# If yes, will cache results
taskqueue.add(
params={
'confKey': p_key,
'speaker': data['speaker']
},
url='/tasks/set_featured_speaker'
)
return request
def _copySessionToForm(self, session):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert Date and Time to date string; just copy others
if field.name in ['startTime', 'date']:
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf
# Session end points
@endpoints.method(SessionForm, SessionForm,
path='sessions',
http_method='POST', name='createSession')
def createSession(self, request):
"""create new session"""
return self._createSessionObject(request)
@endpoints.method(SPEAKER_GET_REQUEST, SessionForms,
path='sessions/{speaker}',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Return all sessions for a speaker across all conferences"""
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
speaker = data['speaker']
# filter query sessions for a speaker
sessions = Session.query(Session.speaker == speaker)
# return session forms
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
http_method='GET', name='getConferenceSessionsToDate')
def getConferenceSessionsToDate(self, request):
"""Returns a conference's sessions to date sorted by date & time."""
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# fetch existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(ancestor=conf.key)\
.filter(Session.date <= datetime.now())\
.order(Session.date, Session.startTime)
# return set of SessionForm objects per Session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
@endpoints.method(WISHLIST_POST_REQUEST, SessionForm,
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Adds a session to a user's wishlist"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# fetch and check session
session = ndb.Key(urlsafe=request.websafeSessionKey).get()
# check that session exists
if not session:
raise endpoints.NotFoundException(
'No session found with key: %s' % request.websafeSessionKey)
# fetch profile
prof = self._getProfileFromUser()
# check if session already added to wishlist
if session.key in prof.sessionWishlist:
raise endpoints.BadRequestException(
'Session already saved to wishlist: %s' % request.websafeSessionKey)
# append to user profile's wishlist
prof.sessionWishlist.append(session.key)
prof.put()
return self._copySessionToForm(session)
@endpoints.method(message_types.VoidMessage, SessionForms,
http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Returns a user's wishlist of sessions to attend"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# fetch profile and wishlist
prof = self._getProfileFromUser()
session_keys = prof.sessionWishlist
sessions = [session_key.get() for session_key in session_keys]
# return sessions set
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(message_types.VoidMessage, SpeakerForm,
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Returns the sessions of the featured speaker"""
# attempt to get data from memcache
data = memcache.get(MEMCACHE_FEAT_SPKR_KEY)
speaker = None
sessionNames = []
if data and data.has_key('speaker') and data.has_key('sessionNames'):
speaker = data['speaker']
sessionNames = data['sessionNames']
# populate speaker form
sf = SpeakerForm()
for field in sf.all_fields():
if field.name == 'sessionNames':
setattr(sf, field.name, sessionNames)
elif field.name == 'speaker':
setattr(sf, field.name, speaker)
sf.check_initialized()
return sf
@endpoints.method(message_types.VoidMessage, SessionForms,
http_method='GET', name='getNonWorkshopDaySessions')
def getNonWorkshopDaySessions(self, request):
"""Returns non-workshop sessions scheduled before 7pm"""
day_sessions = Session.query(ndb.AND(
Session.startTime <= time(hour=19),
Session.startTime != None)
)
# loop thru to filter out workshop type sessions
qualified_sessions = []
for session in day_sessions:
if 'workshop' in session.typeOfSession:
continue
else:
qualified_sessions.append(session)
return SessionForms(
items=[self._copySessionToForm(session) for session in qualified_sessions]
)
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
# get user Profile
prof = self._getProfileFromUser()
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])
for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
# return an existing announcement from Memcache or an empty string.
announcement = (memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
return StringMessage(data=announcement)
# registers API
api = endpoints.api_server([ConferenceApi])
| {
"content_hash": "b642ccaa3527194c5df3ba00747ea800",
"timestamp": "",
"source": "github",
"line_count": 829,
"max_line_length": 110,
"avg_line_length": 39.49698431845597,
"alnum_prop": 0.6138716672265828,
"repo_name": "AjayRad/project4_conf_central",
"id": "71d8b113821d792ef0d2c231af93828f785f8213",
"size": "32766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32836"
},
{
"name": "Python",
"bytes": "41870"
}
],
"symlink_target": ""
} |
import hmac
from datetime import datetime
from time import mktime
from handler import *
from model import *
def genInvite():
code = '%i.%i' % (session.user.id, mktime(datetime.now().timetuple()))
return code + hmac.new(Config.getString('secret_key'), code).hexdigest()[:8]
@handler('invite/index')
def get_index():
return dict(invite_url='https://questcompanions.com' + get_accept.url(code=genInvite()))
@handler('invite/index')
def post_index(email):
if u'@' not in email or '\n' in email or '\r' in email or ',' in email:
return dict(error='Invalid email')
elif len(User.some(email=email)):
return dict(alert='Your friend is already a QuestCompanions member')
code = genInvite()
handler.email(email, 'invite', code=code, username=None if session.user.admin else session.user.username)
return dict(alert='Invite sent successfully!')
def codeCheck(code):
code, mac = code[:-8], code[-8:]
return hmac.new(Config.getString('secret_key'), code).hexdigest()[:8] == mac
@handler('invite/accept', authed=False)
def get_accept(code=None):
if session.user != None:
redirect(handler.index.get_index.url(error='You\'re already registered'))
elif code == None or not codeCheck(code):
return dict(error='Invalid invitation code')
return dict(code=code)
@handler('invite/accept', authed=False)
def post_accept(code, username, password, email):
if session.user != None:
redirect(handler.index.get_index.url(error='You\'re already registered'))
elif code == None or not codeCheck(code):
return dict(error='Invalid invitation code')
error = None
if User.one(username=username):
error = u'Username is taken'
elif len(password) < 6:
error = u'Password must be at least 6 characters'
elif u'@' not in email or '\n' in email or '\r' in email or ',' in email:
error = u'Invalid email'
if error:
return dict(code=code, username=username, email=email, error=error)
user = User.add(username, password, False)
user.change(email=email)
session['userId'] = user.id
redirect(handler.index.get_index.url(alert='Congratulations! You\'re all set up. Check your email for the verification.'))
| {
"content_hash": "c70df189155de21c8231cc5c8b4e06db",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 125,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7161716171617162,
"repo_name": "daeken/QuestCompanions",
"id": "911531ab8afbb782f0df3e56b8b311e8e4c24d1e",
"size": "2121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/invite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "5775"
},
{
"name": "JavaScript",
"bytes": "7110"
},
{
"name": "Python",
"bytes": "89114"
},
{
"name": "Ruby",
"bytes": "1833"
}
],
"symlink_target": ""
} |
from Bio import SeqIO
import sys
START = int(sys.argv[2])
END = int(sys.argv[3])
for rec in SeqIO.parse(open(sys.argv[1]), "fasta"):
SeqIO.write([rec[START:END]], sys.stdout, "fasta")
| {
"content_hash": "274832cf93e3c66609a69ee99327d8f8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 20.88888888888889,
"alnum_prop": 0.675531914893617,
"repo_name": "zibraproject/zika-pipeline",
"id": "5a966388af836db384e58630c3a899bb37c10fff",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/trim.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2383"
},
{
"name": "Makefile",
"bytes": "1978"
},
{
"name": "Python",
"bytes": "117335"
},
{
"name": "R",
"bytes": "1158"
},
{
"name": "Shell",
"bytes": "46567"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
np.set_printoptions(precision=3, suppress=True)
from vehicle_core.model import vehicle_model as vm
from vehicle_core.model import dynamic_model as dm
from vehicle_core.util import conversions as cnv
# controller modes
MODE_POSITION = 0
MODE_VELOCITY = 1
MODE_STATION = 2
MAX_PITCH = np.deg2rad(60) # max pitch (rad)
CONSOLE_STATUS = """controller:
pos: %s
des_p: %s
vel: %s
des_v: %s
mode: %s
"""
class VehicleController(object):
"""VehicleController class wraps the low-level control logic.
This class represent the parent class used to describe the behaviour of a generic vehicle controller.
Some standard features are implemented in this parent-class while specific approaches are implemented in children
classes overriding interface methods.
"""
def __init__(self, dt, config, **kwargs):
# config
self.dt = dt
self.config = config
# mode
self.ctrl_mode = MODE_POSITION
# states
self.pos = np.zeros(6)
self.vel = np.zeros(6)
# requests
self.des_pos = np.zeros(6)
self.des_vel = np.zeros(6)
# limits
self.lim_vel = kwargs.get('lim_vel', 10 * np.ones(6))
def update_config(self, ctrl_config, model_config):
pass
def update(self, position, velocity):
return np.zeros(6)
def __str__(self):
return CONSOLE_STATUS % (
self.pos, self.des_pos, self.vel, self.des_vel, self.ctrl_mode
)
class CascadedController(VehicleController):
"""CascadedController implements a controller strategy based on the use of cascaded PID controllers.
This class encapsulate a cascaded PID controller and exposes several methods to provide input for the
controller architecture and to obtain the output, namely forces and thrusters commands, to control the vehicle.
"""
def __init__(self, dt, ctrl_config, model_config, **kwargs):
super(CascadedController, self).__init__(dt, ctrl_config, **kwargs)
# init params
self.pos_Kp = np.zeros(6)
self.pos_Kd = np.zeros(6)
self.pos_Ki = np.zeros(6)
self.vel_Kp = np.zeros(6)
self.vel_Kd = np.zeros(6)
self.vel_Ki = np.zeros(6)
self.pos_lim = np.zeros(6)
self.vel_lim = np.zeros(6)
self.offset_z = 0.0
self.offset_m = 0.0
self.feedforward_model = False
self.linearized_model = False
# intermediate requests
self.req_vel = np.zeros(6)
self.tau_ctrl = np.zeros(6)
self.tau_prev = np.zeros(6)
# errors
self.err_pos = np.zeros(6)
self.err_pos_prev = np.zeros(6)
self.err_pos_der = np.zeros(6)
self.err_pos_int = np.zeros(6)
self.err_vel = np.zeros(6)
self.err_vel_prev = np.zeros(6)
self.err_vel_der = np.zeros(6)
self.err_vel_int = np.zeros(6)
self.err_intermediate = 0.0
self.err_intermediate_prev = 0.0
self.err_intermediate_der = 0.0
self.err_intermediate_int = 0.0
# init jacobians matrices
self.J = np.zeros((6, 6)) # jacobian matrix (translate velocity from body referenced to Earth referenced)
self.J_inv = np.zeros((6, 6)) # inverse jacobian matrix
def update_config(self, ctrl_config, model_config):
# trimming offsets
self.offset_z = float(ctrl_config.get('offset_z', 0.0))
self.offset_m = float(ctrl_config.get('offset_m', 0.0))
# vehicle model
self.feedforward_model = bool(ctrl_config.get('feedforward_model', False))
self.linearized_model = bool(ctrl_config.get('linearized_model', False))
if self.feedforward_model or self.linearized_model:
self.model = vm.VehicleModel(model_config)
# pid parameters (position)
self.pos_Kp = np.array([
ctrl_config['pos_x']['kp'],
ctrl_config['pos_y']['kp'],
ctrl_config['pos_z']['kp'],
ctrl_config['pos_k']['kp'],
ctrl_config['pos_m']['kp'],
ctrl_config['pos_n']['kp'],
])
self.pos_Kd = np.array([
ctrl_config['pos_x']['kd'],
ctrl_config['pos_y']['kd'],
ctrl_config['pos_z']['kd'],
ctrl_config['pos_k']['kd'],
ctrl_config['pos_m']['kd'],
ctrl_config['pos_n']['kd'],
])
self.pos_Ki = np.array([
ctrl_config['pos_x']['ki'],
ctrl_config['pos_y']['ki'],
ctrl_config['pos_z']['ki'],
ctrl_config['pos_k']['ki'],
ctrl_config['pos_m']['ki'],
ctrl_config['pos_n']['ki'],
])
self.pos_lim = np.array([
ctrl_config['pos_x']['lim'],
ctrl_config['pos_y']['lim'],
ctrl_config['pos_z']['lim'],
ctrl_config['pos_k']['lim'],
ctrl_config['pos_m']['lim'],
ctrl_config['pos_n']['lim'],
])
# pid parameters (velocity)
self.vel_Kp = np.array([
ctrl_config['vel_u']['kp'],
ctrl_config['vel_v']['kp'],
ctrl_config['vel_w']['kp'],
ctrl_config['vel_p']['kp'],
ctrl_config['vel_q']['kp'],
ctrl_config['vel_r']['kp'],
])
self.vel_Kd = np.array([
ctrl_config['vel_u']['kd'],
ctrl_config['vel_v']['kd'],
ctrl_config['vel_w']['kd'],
ctrl_config['vel_p']['kd'],
ctrl_config['vel_q']['kd'],
ctrl_config['vel_r']['kd'],
])
self.vel_Ki = np.array([
ctrl_config['vel_u']['ki'],
ctrl_config['vel_v']['ki'],
ctrl_config['vel_w']['ki'],
ctrl_config['vel_p']['ki'],
ctrl_config['vel_q']['ki'],
ctrl_config['vel_r']['ki'],
])
self.vel_lim = np.array([
ctrl_config['vel_u']['lim'],
ctrl_config['vel_v']['lim'],
ctrl_config['vel_w']['lim'],
ctrl_config['vel_p']['lim'],
ctrl_config['vel_q']['lim'],
ctrl_config['vel_r']['lim'],
])
self.vel_input_lim = np.array([
ctrl_config['vel_u']['input_lim'],
ctrl_config['vel_v']['input_lim'],
ctrl_config['vel_w']['input_lim'],
ctrl_config['vel_p']['input_lim'],
ctrl_config['vel_q']['input_lim'],
ctrl_config['vel_r']['input_lim'],
])
# pitch controller parameters
self.pitch_surge_coeff = float(ctrl_config.get('pitch_surge_coeff', 0.0))
self.pitch_rest_coeff = float(ctrl_config.get('pitch_rest_coeff', 0.0))
def update(self, position, velocity):
# store nav updates
self.pos = position
self.vel = velocity
# update jacobians
self.J = dm.update_jacobian(self.J, self.pos[3], self.pos[4], self.pos[5])
self.J_inv = np.linalg.pinv(self.J)
# model-free pid cascaded controller
# first pid (outer loop on position)
self.err_pos = self.pos - self.des_pos
self.err_pos = np.dot(self.J_inv, self.err_pos.reshape((6, 1))).flatten()
# wrap angles and limit pitch
self.err_pos[3:6] = cnv.wrap_pi(self.err_pos[3:6])
self.err_pos[4] = np.clip(self.err_pos[4], -MAX_PITCH, MAX_PITCH)
# update errors
self.err_pos_der = (self.err_pos - self.err_pos_prev) / self.dt
self.err_pos_int = np.clip(self.err_pos_int + self.err_pos, -self.pos_lim, self.pos_lim)
# Position integral terms set to zero to avoid oscillations
pos_changed = np.sign(self.err_pos) != np.sign(self.err_pos_prev)
pos_changed[2] = False # ignore the depth
self.err_pos_int[pos_changed] = 0.0
# update previous error
self.err_pos_prev = self.err_pos
# first pid output (plus speed limits if requested by the user)
self.req_vel = (-self.pos_Kp * self.err_pos) + (-self.pos_Kd * self.err_pos_der) + (
-self.pos_Ki * self.err_pos_int)
# if running in velocity mode ignore the first pid
if self.ctrl_mode == MODE_VELOCITY:
self.req_vel = self.des_vel
# apply user velocity limits (if any)
self.req_vel = np.clip(self.req_vel, -self.lim_vel, self.lim_vel)
# model-free pid cascaded controller
# second pid (inner loop on velocity)
self.err_vel = np.clip(self.vel - self.req_vel, -self.vel_input_lim, self.vel_input_lim)
self.err_vel_der = (self.err_vel - self.err_vel_prev) / self.dt
self.err_vel_int = np.clip(self.err_vel_int + self.err_vel, -self.vel_lim, self.vel_lim)
# velocity integral terms set to zero to avoid oscillations
vel_changed = np.sign(self.err_vel) != np.sign(self.err_vel_prev)
vel_changed[2] = False # ignore the depth
self.err_vel_int[vel_changed] = 0.0
# update previous error
self.err_vel_prev = self.err_vel
# second pid output
self.tau_ctrl = (-self.vel_Kp * self.err_vel) + (-self.vel_Kd * self.err_vel_der) + (-self.vel_Ki * self.err_vel_int)
# linearized the plant is its model and the sensor measurements
# enabled only if the feed-forward controller is not used!
if self.linearized_model and not self.feedforward_model:
# calculate the acceleration due to the dynamic coupling forces acting on the vehicle using the sensors'
# measurements without including the tau term (set to zero in this case)
# use the output of the pid as an acceleration
self.acc = self.tau_ctrl
# rewrite the requested force using the dynamical model for linearizing the plant
self.tau_ctrl = self.model.update_tau(self.pos, self.vel, self.acc)
# use feed-forward controller only if the linearized model is disabled
if self.feedforward_model and not self.linearized_model:
self.tau_model = self.model.update_forward_model(self.des_pos, self.req_vel)
self.tau_model[3] = 0 # ignore roll
# feed-forward controller
self.tau_ctrl = self.tau_ctrl + self.tau_model
# pitch controller (NOT WORKING!)
#self.tau_ctrl[4] = self.tau_ctrl[4] + self.pitch_surge_coeff * np.abs(self.vel[0]) * self.vel[0] + self.pitch_rest_coeff * np.sin(self.pos[4])
# hard limits on forces
# default: no roll allowed
self.tau_ctrl[3] = 0.0
# trimming forces: add offsets from config (if any)
self.tau_ctrl[2] += self.offset_z # depth
self.tau_ctrl[4] += self.offset_m # pitch
return self.tau_ctrl
def __str__(self):
model = 'disabled'
if self.feedforward_model:
model = 'feedfoward'
if self.linearized_model:
model = 'linearized'
return """%s
model: %s
req_v: %s
lim_v: %s
ep: %s
ed: %s
ei: %s
evp: %s
evd: %s
evi: %s
tau_c: %s
""" % (
super(CascadedController, self).__str__(),
model,
self.req_vel, self.lim_vel,
self.err_pos, self.err_pos_der, self.err_pos_int,
self.err_vel, self.err_vel_der, self.err_vel_int,
self.tau_ctrl
)
class AutoTuningController(CascadedController):
"""AutoTuningController is ...
"""
def __init__(self, dt, ctrl_config, model_config, **kwargs):
super(AutoTuningController, self).__init__(dt, ctrl_config, model_config, **kwargs)
# adaption coefficients for each DOF of vehicle
self.adapt_coeff_pos = np.zeros(6) # position
self.adapt_coeff_vel = np.zeros(6) # velocity
self.adapt_limit_pos = np.zeros(3)
self.adapt_limit_vel = np.zeros(3)
self.pitch_surge_coeff = 0.0
self.pitch_rest_coeff = 0.0
self.tau_ctrl_prev = np.zeros(6)
def update_config(self, ctrl_config, model_config):
# load parameters from default controller
super(AutoTuningController, self).update_config(ctrl_config, model_config)
# adaptation coefficients
self.adapt_coeff_pos = np.array([
ctrl_config['adapt_coeff_pos']['x'],
ctrl_config['adapt_coeff_pos']['y'],
ctrl_config['adapt_coeff_pos']['z'],
ctrl_config['adapt_coeff_pos']['k'],
ctrl_config['adapt_coeff_pos']['m'],
ctrl_config['adapt_coeff_pos']['n'],
])
self.adapt_coeff_vel = np.array([
ctrl_config['adapt_coeff_vel']['u'],
ctrl_config['adapt_coeff_vel']['v'],
ctrl_config['adapt_coeff_vel']['w'],
ctrl_config['adapt_coeff_vel']['p'],
ctrl_config['adapt_coeff_vel']['q'],
ctrl_config['adapt_coeff_vel']['r'],
])
self.adapt_limit_pos = np.array([
ctrl_config['adapt_limit_pos']['p'],
ctrl_config['adapt_limit_pos']['i'],
ctrl_config['adapt_limit_pos']['d']
])
self.adapt_limit_vel = np.array([
ctrl_config['adapt_limit_vel']['p'],
ctrl_config['adapt_limit_vel']['i'],
ctrl_config['adapt_limit_vel']['d']
])
# pitch controller parameters
self.pitch_surge_coeff = float(ctrl_config.get('pitch_surge_coeff', 0.0))
self.pitch_rest_coeff = float(ctrl_config.get('pitch_rest_coeff', 0.0))
def update(self, position, velocity):
# store nav updates
self.pos = position
self.vel = velocity
# update jacobians
self.J = dm.update_jacobian(self.J, self.pos[3], self.pos[4], self.pos[5])
self.J_inv = np.linalg.inv(self.J)
# PI position controller
self.err_pos = self.pos - self.des_pos
self.err_pos = np.dot(self.J_inv, self.err_pos.reshape(-1, 1)).flatten()
# wrap angles and limit pitch
self.err_pos[3:6] = cnv.wrap_pi(self.err_pos[3:6])
self.err_pos[4] = np.clip(self.err_pos[4], -MAX_PITCH, MAX_PITCH)
# update the errors
self.err_pos_int = np.clip(self.err_pos_int + self.err_pos, -self.pos_lim, self.pos_lim)
self.err_pos_der = (self.err_pos - self.err_pos_prev) / self.dt
self.err_pos_prev = self.err_pos
# adaptive tuning of position gains
self.pos_Kp += self.adapt_coeff_pos * self.err_pos * np.abs(self.err_pos)
self.pos_Ki += self.adapt_coeff_pos * self.err_pos * self.err_pos_int
self.pos_Kd += self.adapt_coeff_pos * self.err_pos * self.err_pos_der
self.pos_Kp = np.clip(self.pos_Kp, -self.adapt_limit_pos[0], self.adapt_limit_pos[0])
self.pos_Ki = np.clip(self.pos_Ki, -self.adapt_limit_pos[1], self.adapt_limit_pos[1])
self.pos_Kd = np.clip(self.pos_Kd, -self.adapt_limit_pos[2], self.adapt_limit_pos[2])
# Position integral terms set to zero to avoid oscillations
pos_changed = np.sign(self.err_pos) != np.sign(self.err_pos_prev)
pos_changed[2] = False # ignore the depth
self.err_pos_int[pos_changed] = 0.0
# PI controller limited (outer loop on position) - velocity
self.req_vel = (-np.abs(self.pos_Kp) * self.err_pos) + (-np.abs(self.pos_Ki) * self.err_pos_int) + (
-np.abs(self.pos_Kd) * self.err_pos_der)
# if running in velocity mode ignore the first pid
if self.ctrl_mode == MODE_VELOCITY:
self.req_vel = self.des_vel
# apply user velocity limits (if any)
self.req_vel = np.clip(self.req_vel, -self.lim_vel, self.lim_vel)
# velocity errors
self.err_vel = np.clip(self.vel - self.req_vel, -self.vel_input_lim, self.vel_input_lim)
self.err_vel_int = np.clip(self.err_vel_int + self.err_vel, -self.vel_lim, self.vel_lim)
self.err_vel_der = (self.err_vel - self.err_vel_prev) / self.dt
self.err_vel_prev = self.err_vel
# adaptive tuning of velocity gains
self.vel_Kp += self.adapt_coeff_vel * self.err_vel * np.abs(self.err_vel)
self.vel_Ki += self.adapt_coeff_vel * self.err_vel * self.err_vel_int
self.vel_Kd += self.adapt_coeff_vel * self.err_vel * self.err_vel_der
self.vel_Kp = np.clip(self.vel_Kp, -self.adapt_limit_vel[0], self.adapt_limit_vel[0])
self.vel_Ki = np.clip(self.vel_Ki, -self.adapt_limit_vel[1], self.adapt_limit_vel[1])
self.vel_Kd = np.clip(self.vel_Kd, -self.adapt_limit_vel[2], self.adapt_limit_vel[2])
# Velocity integral terms set to zero to avoid oscillations
vel_changed = np.sign(self.err_vel) != np.sign(self.err_vel_prev)
vel_changed[2] = False # ignore the depth
self.err_vel_int[vel_changed] = 0.0
# PI controller velocity
self.tau_ctrl = (-np.abs(self.vel_Kp) * self.err_vel) + (-np.abs(self.vel_Ki) * self.err_vel_int) + (
-np.abs(self.vel_Kd) * self.err_vel_der)
# use feed-forward controller only if the linearized model is disabled
if self.feedforward_model and not self.linearized_model:
self.tau_model = self.model.update_forward_model(self.des_pos, self.req_vel)
self.tau_model[3] = 0 # ignore roll
# feed-forward controller
self.tau_ctrl = self.tau_ctrl + self.tau_model
if self.linearized_model and not self.feedforward_model:
# calculate the acceleration due to the dynamic coupling forces acting on the vehicle using the sensors'
# measurements without including the tau term (set to zero in this case)
# use the output of the pid as an acceleration
self.acc = self.tau_ctrl
# rewrite the requested force using the dynamical model for linearizing the plant
self.tau_ctrl = self.model.update_tau(self.pos, self.vel, self.acc)
# pitch controller
self.tau_ctrl[4] = self.tau_ctrl[4] + self.pitch_surge_coeff * np.abs(self.vel[0]) * self.vel[0] + self.pitch_rest_coeff * np.sin(self.pos[4])
# hard limits on forces
# default: no roll allowed
self.tau_ctrl[3] = 0.0
# trimming forces: add offsets from config (if any)
self.tau_ctrl[2] += self.offset_z # depth
self.tau_ctrl[4] += self.offset_m # pitch
return self.tau_ctrl
def __str__(self):
return """%s
pos_kp: %s
pos_kd: %s
pos_ki: %s
vel_kp: %s
vel_kd: %s
vel_ki: %s
""" % (
super(AutoTuningController, self).__str__(),
self.pos_Kp, self.pos_Kd, self.pos_Ki,
self.vel_Kp, self.vel_Kd, self.vel_Ki,
)
class CoupledModelController(VehicleController):
"""CascadedController implements a controller strategy based on the use of cascaded PID controllers.
This class encapsulate a cascaded PID controller and exposes several methods to provide input for the
controller architecture and to obtain the output, namely forces and thrusters commands, to control the vehicle.
"""
def __init__(self, dt, ctrl_config, model_config, **kwargs):
super(CoupledModelController, self).__init__(dt, ctrl_config, **kwargs)
# init params
self.pos_Kp = np.zeros(6)
self.pos_Kd = np.zeros(6)
self.pos_Ki = np.zeros(6)
self.pos_lim = np.zeros(6)
self.vel_lim = np.zeros(6)
self.coupl_lim = np.zeros(6)
self.offset_z = 0.0
self.offset_m = 0.0
# intermediate requests
self.req_tau = np.zeros(6)
self.tau_ctrl = np.zeros(6)
self.tau_prev = np.zeros(6)
# previous values
self.pos_prev = np.zeros(6)
self.vel_prev = np.zeros(6)
# errors
# errors
self.err_pos = np.zeros(6)
self.err_pos_prev = np.zeros(6)
self.err_pos_der = np.zeros(6)
self.err_pos_int = np.zeros(6)
self.err_vel = np.zeros(6)
self.err_vel_prev = np.zeros(6)
self.err_vel_der = np.zeros(6)
self.err_vel_int = np.zeros(6)
# init jacobians matrices
self.J = np.zeros((6, 6)) # jacobian matrix (translate velocity from body referenced to Earth referenced)
self.J_inv = np.zeros((6, 6)) # inverse jacobian matrix
self.des_acc = np.zeros(6)
def update_config(self, ctrl_config, model_config):
# trimming offsets
self.offset_z = float(ctrl_config.get('offset_z', 0.0))
self.offset_m = float(ctrl_config.get('offset_m', 0.0))
self.model = vm.VehicleModel(model_config)
# pid parameters (position)
# pid parameters (position)
self.pos_Kp = np.array([
ctrl_config['pos_x']['kp'],
ctrl_config['pos_y']['kp'],
ctrl_config['pos_z']['kp'],
ctrl_config['pos_k']['kp'],
ctrl_config['pos_m']['kp'],
ctrl_config['pos_n']['kp'],
])
self.pos_Kd = np.array([
ctrl_config['pos_x']['kd'],
ctrl_config['pos_y']['kd'],
ctrl_config['pos_z']['kd'],
ctrl_config['pos_k']['kd'],
ctrl_config['pos_m']['kd'],
ctrl_config['pos_n']['kd'],
])
self.pos_Ki = np.array([
ctrl_config['pos_x']['ki'],
ctrl_config['pos_y']['ki'],
ctrl_config['pos_z']['ki'],
ctrl_config['pos_k']['ki'],
ctrl_config['pos_m']['ki'],
ctrl_config['pos_n']['ki'],
])
self.pos_lim = np.array([
ctrl_config['pos_x']['lim'],
ctrl_config['pos_y']['lim'],
ctrl_config['pos_z']['lim'],
ctrl_config['pos_k']['lim'],
ctrl_config['pos_m']['lim'],
ctrl_config['pos_n']['lim'],
])
# pid parameters (velocity)
self.vel_Kp = np.array([
ctrl_config['vel_u']['kp'],
ctrl_config['vel_v']['kp'],
ctrl_config['vel_w']['kp'],
ctrl_config['vel_p']['kp'],
ctrl_config['vel_q']['kp'],
ctrl_config['vel_r']['kp'],
])
self.vel_Kd = np.array([
ctrl_config['vel_u']['kd'],
ctrl_config['vel_v']['kd'],
ctrl_config['vel_w']['kd'],
ctrl_config['vel_p']['kd'],
ctrl_config['vel_q']['kd'],
ctrl_config['vel_r']['kd'],
])
self.vel_Ki = np.array([
ctrl_config['vel_u']['ki'],
ctrl_config['vel_v']['ki'],
ctrl_config['vel_w']['ki'],
ctrl_config['vel_p']['ki'],
ctrl_config['vel_q']['ki'],
ctrl_config['vel_r']['ki'],
])
self.vel_lim = np.array([
ctrl_config['vel_u']['lim'],
ctrl_config['vel_v']['lim'],
ctrl_config['vel_w']['lim'],
ctrl_config['vel_p']['lim'],
ctrl_config['vel_q']['lim'],
ctrl_config['vel_r']['lim'],
])
self.vel_input_lim = np.array([
ctrl_config['vel_u']['input_lim'],
ctrl_config['vel_v']['input_lim'],
ctrl_config['vel_w']['input_lim'],
ctrl_config['vel_p']['input_lim'],
ctrl_config['vel_q']['input_lim'],
ctrl_config['vel_r']['input_lim'],
])
self.couple_lim = np.array([
ctrl_config['vel_u']['couple_lim'],
ctrl_config['vel_v']['couple_lim'],
ctrl_config['vel_w']['couple_lim'],
ctrl_config['vel_p']['couple_lim'],
ctrl_config['vel_q']['couple_lim'],
ctrl_config['vel_r']['couple_lim'],
])
# pitch controller parameters
self.pitch_surge_coeff = float(ctrl_config.get('pitch_surge_coeff', 0.0))
self.pitch_rest_coeff = float(ctrl_config.get('pitch_rest_coeff', 0.0))
def update(self, position, velocity):
self.pos = position
self.vel = velocity
# update jacobians
self.J = dm.update_jacobian(self.J, self.pos[3], self.pos[4], self.pos[5])
self.J_inv = np.linalg.pinv(self.J)
# model-free pid cascaded controller
# first pid (outer loop on position)
self.err_pos = self.pos - self.des_pos
self.err_pos = np.dot(self.J_inv, self.err_pos.reshape((6, 1))).flatten()
# wrap angles and limit pitch
self.err_pos[3:6] = cnv.wrap_pi(self.err_pos[3:6])
self.err_pos[4] = np.clip(self.err_pos[4], -MAX_PITCH, MAX_PITCH)
# update errors
self.err_pos_der = (self.err_pos - self.err_pos_prev) / self.dt
self.err_pos_int = np.clip(self.err_pos_int + self.err_pos, -self.pos_lim, self.pos_lim)
# Position integral terms set to zero to avoid oscillations
pos_changed = np.sign(self.err_pos) != np.sign(self.err_pos_prev)
pos_changed[2] = False # ignore the depth
self.err_pos_int[pos_changed] = 0.0
# update previous error
self.err_pos_prev = self.err_pos
# first pid output (plus speed limits if requested by the user)
self.req_vel = (-self.pos_Kp * self.err_pos) + (-self.pos_Kd * self.err_pos_der) + (
-self.pos_Ki * self.err_pos_int)
# Derivation of desired position/velocity
self.des_acc = (self.vel - self.vel_prev) / self.dt
self.pos_prev = self.pos
self.vel_prev = self.vel
# if running in velocity mode ignore the first pid
if self.ctrl_mode == MODE_VELOCITY:
self.req_vel = self.des_vel
# apply user velocity limits (if any)
self.req_vel = np.clip(self.req_vel, -self.lim_vel, self.lim_vel)
# model-free pid cascaded controller
# second pid (inner loop on velocity)
self.err_vel = np.clip(self.vel - self.req_vel, -self.vel_input_lim, self.vel_input_lim)
self.err_vel_der = (self.err_vel - self.err_vel_prev) / self.dt
self.err_vel_int = np.clip(self.err_vel_int + self.err_vel, -self.vel_lim, self.vel_lim)
# velocity integral terms set to zero to avoid oscillations
vel_changed = np.sign(self.err_vel) != np.sign(self.err_vel_prev)
vel_changed[2] = False # ignore the depth
self.err_vel_int[vel_changed] = 0.0
# update previous error
self.err_vel_prev = self.err_vel
# temp_tau = self.des_acc[3] - self.vel_Kp[3] * self.err_vel[3] - self.vel_Ki[3] * self.err_vel_int[3] - self.vel_Kd[3] * self.err_vel_der[3]
# self.err_vel[4] = np.clip(self.vel[4] - temp_tau, -self.vel_input_lim[4], self.vel_input_lim[4])
#coupled-model based controller
self.tau_prev = self.model.update_coupled_model(self.pos, self.vel, self.des_acc, self.req_vel)
self.tau_prev = np.clip(self.tau_prev, -self.couple_lim, self.couple_lim)
self.req_tau = self.des_acc - self.vel_Kp * self.err_vel - self.vel_Ki * self.err_vel_int - self.vel_Kd * self.err_vel_der
self.tau_ctrl = self.req_tau + self.tau_prev #np.dot(self.model.M, self.req_tau)
# hard limits on forces
# default: no roll allowed
self.tau_ctrl[3] = 0.0
return self.tau_ctrl
def __str__(self):
return """%s
tau_prev: %s
tau_ctrl: %s
""" % (
super(CoupledModelController, self).__str__(),
self.tau_prev, self.tau_ctrl,
)
| {
"content_hash": "89e4d787be3d339825f8f836eec39d6c",
"timestamp": "",
"source": "github",
"line_count": 763,
"max_line_length": 151,
"avg_line_length": 36.17955439056357,
"alnum_prop": 0.5599710197428002,
"repo_name": "decabyte/vehicle_core",
"id": "bc390ff1aa67e7c435678f700b9ac0d79dc2647d",
"size": "29377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/vehicle_core/control/vehicle_controller.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "1203"
},
{
"name": "Makefile",
"bytes": "43"
},
{
"name": "Python",
"bytes": "327502"
},
{
"name": "Shell",
"bytes": "38601"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import bson
import mock
from st2common.constants.triggers import TIMER_TRIGGER_TYPES
from st2common.models.db.trigger import TriggerDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.trigger import TriggerType
from st2common.persistence.trigger import Trigger
from st2reactor.timer.base import St2Timer
from st2tests.base import CleanDbTestCase
class St2TimerTestCase(CleanDbTestCase):
def test_trigger_types_are_registered_on_start(self):
timer = St2Timer()
timer._scheduler = mock.Mock()
# Verify there are no TriggerType in the db when we start
self.assertItemsEqual(TriggerType.get_all(), [])
timer.start()
# Verify TriggerType objects have been created
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), len(TIMER_TRIGGER_TYPES))
timer_trigger_type_refs = list(TIMER_TRIGGER_TYPES.keys())
for trigger_type in trigger_type_dbs:
ref = ResourceReference(pack=trigger_type.pack, name=trigger_type.name).ref
self.assertIn(ref, timer_trigger_type_refs)
def test_existing_rules_are_loaded_on_start(self):
# Assert that we dispatch message for every existing Trigger object
St2Timer._handle_create_trigger = mock.Mock()
timer = St2Timer()
timer._scheduler = mock.Mock()
timer._trigger_watcher.run = mock.Mock()
# Verify there are no Trigger and TriggerType in the db wh:w
self.assertItemsEqual(Trigger.get_all(), [])
self.assertItemsEqual(TriggerType.get_all(), [])
# Add a dummy timer Trigger object
type_ = list(TIMER_TRIGGER_TYPES.keys())[0]
parameters = {"unit": "seconds", "delta": 1000}
trigger_db = TriggerDB(
id=bson.ObjectId(),
name="test_trigger_1",
pack="dummy",
type=type_,
parameters=parameters,
)
trigger_db = Trigger.add_or_update(trigger_db)
# Verify object has been added
self.assertEqual(len(Trigger.get_all()), 1)
timer.start()
timer._trigger_watcher._load_thread.wait()
# Verify handlers are called
timer._handle_create_trigger.assert_called_with(trigger_db)
@mock.patch("st2common.transport.reactor.TriggerDispatcher.dispatch")
def test_timer_trace_tag_creation(self, dispatch_mock):
timer = St2Timer()
timer._scheduler = mock.Mock()
timer._trigger_watcher = mock.Mock()
# Add a dummy timer Trigger object
type_ = list(TIMER_TRIGGER_TYPES.keys())[0]
parameters = {"unit": "seconds", "delta": 1}
trigger_db = TriggerDB(
name="test_trigger_1", pack="dummy", type=type_, parameters=parameters
)
timer.add_trigger(trigger_db)
timer._emit_trigger_instance(trigger=trigger_db.to_serializable_dict())
self.assertEqual(
dispatch_mock.call_args[1]["trace_context"].trace_tag,
"%s-%s" % (TIMER_TRIGGER_TYPES[type_]["name"], trigger_db.name),
)
| {
"content_hash": "2101a68ee030f37e273fc4004c894d38",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 87,
"avg_line_length": 37.07058823529412,
"alnum_prop": 0.6521739130434783,
"repo_name": "Plexxi/st2",
"id": "f4311d18d838bcddae24a01d9e2dde6d8f88cdc2",
"size": "3779",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2reactor/tests/unit/test_timer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
} |
'''
Funcion on this file isn't sync. It receive data from configNode and send to structureDevice
It's DONE
****Waste-time = 10 hour ************************
Because : wrong perceptions about Node. It should save inNode on every Device or Sensor
Take advantage of : Learn deep about Dict,list in Python
[Bad Code]:
# tempvalueItems = databaseNode[dataNodeSlave01][tempnameItems]
# listNode = databaseNode.keys()
# for x in range(0,len(listNode)):
# if tempnameNode[listFloorsOnStructure[numberfloor]]["name"] == listNode[x]:
# databaseNode[listNode[x]][]
# return 0
# Hoi bien change
#...
#Hoi floor co phai Node khong, tra ve 1 dict chua cac Floor,Room la Node
tempnameNode={}
for x in range(0,len(importantDatabase)):
namefloor=stringInsert0InFirst("floor_",x,2)
tempnameNode[namefloor]={}
tempnameNode[namefloor].update({"name": importantDatabase[namefloor]["name"]})
for y in range(0,len(importantDatabase[namefloor])-1):
nameroom=stringInsert0InFirst("room_",y,2)
tempnameNode[namefloor][nameroom]={}
tempnameNode[namefloor][nameroom].update({"name": importantDatabase[namefloor][nameroom]["name"]})
if tempnameNode[namefloor][nameroom]["name"] == "NullNode":
del tempnameNode[namefloor][nameroom]
#Delete if Floor is empty && Room is empty
if tempnameNode[namefloor]["name"] == "NullNode":
if(len(tempnameNode[namefloor])==1):
del tempnameNode[namefloor]
#print tempnameNode
'''
import data.structureDevice
reload(data.structureDevice)
from data.structureDevice import importantDatabase
import data.configNode
reload(data.configNode)
from data.configNode import databaseNode
import json
iotDirectory="iot/"
dataDirectory= "data/"
def stringInsert0InFirst(firststring,number,lenWant):
shortageString = ""
shortage = lenWant - len(str(number))
if shortage<0:
raise ValueError("number must be > lenWant")
for x in range (0,shortage):
shortageString=shortageString+'0'
return firststring+shortageString+str(number)
listFloorsOnStructure=[]
###########list of floor
listFloorsOnStructure=importantDatabase.keys()
#print listFloorsOnStructure
for h in range(0,len(listFloorsOnStructure)):
listRoomsOnStructure=[]
listRoomsOnStructure=importantDatabase[listFloorsOnStructure[h]].keys()
listRoomsOnStructure.remove("nameOnWeb")
for k in range(0,len(listRoomsOnStructure)):
listDSOnStructure=[]
listDSOnStructure= importantDatabase[listFloorsOnStructure[h]][listRoomsOnStructure[k]].keys()
listDSOnStructure.remove("nameOnWeb") #Device / Sensor
for i in range(0,len(listDSOnStructure)):
listItemsOnStructure=[]
listItemsOnStructure = importantDatabase[listFloorsOnStructure[h]][listRoomsOnStructure[k]][listDSOnStructure[i]].keys()
# print listItemsOnStructure
for x in range(0,len(listItemsOnStructure)):
# print "OK"
tempnameItems = importantDatabase[listFloorsOnStructure[h]][listRoomsOnStructure[k]][listDSOnStructure[i]][listItemsOnStructure[x]]['name']
typeNode = importantDatabase[listFloorsOnStructure[h]][listRoomsOnStructure[k]][listDSOnStructure[i]][listItemsOnStructure[x]]['inNode']
# print typeNode
tempvalueItems = databaseNode["d"][typeNode][tempnameItems]
importantDatabase[listFloorsOnStructure[h]][listRoomsOnStructure[k]][listDSOnStructure[i]][listItemsOnStructure[x]]['value'] = tempvalueItems
# print listRoomsOnStructure[k],listItemsOnStructure[x],tempvalueItems
dataString=json.dumps(importantDatabase, sort_keys=True,indent=4, separators=(',', ': '))
with open(iotDirectory+dataDirectory+"structureDevice.py", 'w') as configNodefile:
configNodefile.write("importantDatabase = "+dataString)
configNodefile.close()
print "Transfering data from Node to StructureDevice is done!"
| {
"content_hash": "1452a4bd162c4b7503950fe102410d6d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 157,
"avg_line_length": 48.38823529411765,
"alnum_prop": 0.6892778993435449,
"repo_name": "lnanhkhoa/thesis-iot-khoa",
"id": "b221372bb7c6da0ca3fec1299e8375b7e4319041",
"size": "4114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iot/syncconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1905319"
},
{
"name": "HTML",
"bytes": "852899"
},
{
"name": "JavaScript",
"bytes": "2193045"
},
{
"name": "Python",
"bytes": "831985"
}
],
"symlink_target": ""
} |
import subprocess
import os
from ..core import InstallFailed
from .pip import PIP_INSTALLER
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
from ..shell_utils import read_stdout
SLACKWARE_OS_NAME = 'slackware'
SBOTOOLS_INSTALLER = 'sbotools'
SLACKPKG_INSTALLER = 'slackpkg'
def register_installers(context):
context.set_installer(SBOTOOLS_INSTALLER, SbotoolsInstaller())
context.set_installer(SLACKPKG_INSTALLER, SlackpkgInstaller())
def register_platforms(context):
context.add_os_installer_key(SLACKWARE_OS_NAME, SBOTOOLS_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, PIP_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, SOURCE_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, SLACKPKG_INSTALLER)
context.set_default_os_installer_key(SLACKWARE_OS_NAME, lambda self: SBOTOOLS_INSTALLER)
def sbotools_available():
if not os.path.exists('/usr/sbin/sboinstall'):
return False
return True
def sbotools_detect_single(p):
pkg_list = read_stdout(['ls', '/var/log/packages'])
p = subprocess.Popen(['grep', '-i', '^' + p], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate(pkg_list)
return not p.returncode
def sbotools_detect(packages):
return [p for p in packages if sbotools_detect_single(p)]
class SbotoolsInstaller(PackageManagerInstaller):
def __init__(self):
super(SbotoolsInstaller, self).__init__(sbotools_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
if not sbotools_available():
raise InstallFailed((SBOTOOLS_INSTALLER, 'sbotools is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
cmd = ['sboinstall']
return [self.elevate_priv(cmd + [p] + ['-j']) for p in packages]
def slackpkg_available():
if not os.path.exists('/usr/sbin/slackpkg'):
return False
return True
def slackpkg_detect_single(p):
return not subprocess.call(['slackpkg', 'search', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def slackpkg_detect(packages):
return [p for p in packages if slackpkg_detect_single(p)]
class SlackpkgInstaller(PackageManagerInstaller):
def __init__(self):
super(SlackpkgInstaller, self).__init__(slackpkg_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
# slackpkg does not provide non-interactive mode
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
else:
return [self.elevate_priv(['slackpkg', 'install', p]) for p in packages]
| {
"content_hash": "97750007523c0ec4016150e5263ae95c",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 120,
"avg_line_length": 32.07954545454545,
"alnum_prop": 0.7059865391427559,
"repo_name": "ros-infrastructure/rosdep",
"id": "d6a199e26c002ae1e70f9320cf083797dc5ea176",
"size": "4453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/rosdep2/platforms/slackware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "466"
},
{
"name": "Python",
"bytes": "503079"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
} |
from datetime import datetime
import pytest
from dateutil import tz as dateutil_tz
from arrow import arrow, factory, formatter, locales, parser
@pytest.fixture(scope="class")
def time_utcnow(request):
request.cls.arrow = arrow.Arrow.utcnow()
@pytest.fixture(scope="class")
def time_2013_01_01(request):
request.cls.now = arrow.Arrow.utcnow()
request.cls.arrow = arrow.Arrow(2013, 1, 1)
request.cls.datetime = datetime(2013, 1, 1)
@pytest.fixture(scope="class")
def time_2013_02_03(request):
request.cls.arrow = arrow.Arrow(2013, 2, 3, 12, 30, 45, 1)
@pytest.fixture(scope="class")
def time_2013_02_15(request):
request.cls.datetime = datetime(2013, 2, 15, 3, 41, 22, 8923)
request.cls.arrow = arrow.Arrow.fromdatetime(request.cls.datetime)
@pytest.fixture(scope="class")
def time_1975_12_25(request):
request.cls.datetime = datetime(
1975, 12, 25, 14, 15, 16, tzinfo=dateutil_tz.gettz("America/New_York")
)
request.cls.arrow = arrow.Arrow.fromdatetime(request.cls.datetime)
@pytest.fixture(scope="class")
def arrow_formatter(request):
request.cls.formatter = formatter.DateTimeFormatter()
@pytest.fixture(scope="class")
def arrow_factory(request):
request.cls.factory = factory.ArrowFactory()
@pytest.fixture(scope="class")
def lang_locales(request):
request.cls.locales = locales._locale_map
@pytest.fixture(scope="class")
def lang_locale(request):
# As locale test classes are prefixed with Test, we are dynamically getting the locale by the test class name.
# TestEnglishLocale -> EnglishLocale
name = request.cls.__name__[4:]
request.cls.locale = locales.get_locale_by_class_name(name)
@pytest.fixture(scope="class")
def dt_parser(request):
request.cls.parser = parser.DateTimeParser()
@pytest.fixture(scope="class")
def dt_parser_regex(request):
request.cls.format_regex = parser.DateTimeParser._FORMAT_RE
@pytest.fixture(scope="class")
def tzinfo_parser(request):
request.cls.parser = parser.TzinfoParser()
| {
"content_hash": "d18653f1551c9dfa240bda28e5e0325b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 114,
"avg_line_length": 27.026666666666667,
"alnum_prop": 0.7192895905278737,
"repo_name": "crsmithdev/arrow",
"id": "5d5b9980e78cd4aa5ec0733a312914e99844a52e",
"size": "2027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "863"
},
{
"name": "Python",
"bytes": "357746"
}
],
"symlink_target": ""
} |
import os
import sys
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from luma.led_matrix import __version__ as version
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.autosectionlabel',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.extlinks'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Luma.LED_Matrix: Display driver for MAX7219, WS2812'
author = u'Richard Hull and contributors'
copyright = u'2015-{0}, {1}'.format(datetime.now().year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'luma.led_matrix_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'luma.led_matrix.tex', u'Luma.LED_Matrix Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'luma.led_matrix', u'Luma.LED_Matrix Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'luma.led_matrix', u'Luma.LED_Matrix Documentation',
author, 'luma.led_matrix', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'pillow': ('https://pillow.readthedocs.io/en/latest', None),
'luma.core': ('https://luma-core.readthedocs.io/en/latest', None),
'luma.emulator': ('https://luma-emulator.readthedocs.io/en/latest', None)
}
| {
"content_hash": "e0fbee5eceeddfe3c240d118a13d947b",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 79,
"avg_line_length": 32.06390977443609,
"alnum_prop": 0.7038339781920506,
"repo_name": "rm-hull/max7219",
"id": "48afa3ad8f244f1021b609d1e9d48e013ace7eb1",
"size": "8957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95292"
}
],
"symlink_target": ""
} |
import json
import logging
import create_client
def main():
"""Transfer from standard Cloud Storage to Cloud Storage Nearline."""
logging.getLogger().setLevel(logging.DEBUG)
transfer_service_client = create_client.create_transfer_client()
# Edit this template with desired parameters.
# Specify times below using US Pacific Time Zone.
transfer_job = '''
{
"description": "YOUR DESCRIPTION",
"status": "ENABLED",
"projectId": "YOUR_PROJECT_ID",
"schedule": {
"scheduleStartDate": {
"day": 1,
"month": 1,
"year": 2015
},
"startTimeOfDay": {
"hours": 1,
"minutes": 1
}
},
"transferSpec": {
"gcsDataSource": {
"bucketName": "YOUR_SOURCE_BUCKET"
},
"gcsDataSink": {
"bucketName": "YOUR_SINK_BUCKET"
},
"objectConditions": {
"minTimeElapsedSinceLastModification": "2592000s"
},
"transferOptions": {
"deleteObjectsFromSourceAfterTransfer": true
}
}
}
'''
result = transfer_service_client.transferJobs().create(body=json.loads(
transfer_job)).execute()
logging.info('Returned transferJob: %s', json.dumps(result, indent=4))
if __name__ == '__main__':
main()
# [END all]
| {
"content_hash": "bd2f3df3adf3efa564f4374e9adb3afc",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 75,
"avg_line_length": 28.21153846153846,
"alnum_prop": 0.5173824130879345,
"repo_name": "kocicjelena/python-docs-samples",
"id": "d31bc19072004c1b2cad84b419e9e045bdd7ada1",
"size": "2057",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "storage/storage_transfer/nearline_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128560"
}
],
"symlink_target": ""
} |
import atexit
import random
import shutil
import subprocess
import tempfile
import time
import pymongo
import pytest
class MongoTemporaryInstance(object):
"""Singleton to manage a temporary MongoDB instance
Use this for testing purpose only. The instance is automatically destroyed
at the end of the program.
"""
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
atexit.register(cls._instance.shutdown)
return cls._instance
def __init__(self):
self._tmpdir = tempfile.mkdtemp()
self._port = 27017
self._process = subprocess.Popen(['mongod', '--bind_ip', 'localhost',
'--port', str(self._port),
'--dbpath', self._tmpdir,
'--nojournal', '--nohttpinterface',
'--noauth', '--smallfiles',
'--syncdelay', '0',
'--nssize', '1', ],
stdout=open('/tmp/mongo-temp.log', 'wb'),
stderr=subprocess.STDOUT)
# XXX: wait for the instance to be ready
# Mongo is ready in a glance, we just wait to be able to open a
# Connection.
for i in range(10):
time.sleep(0.2)
try:
self._conn = pymongo.MongoClient('localhost', self._port)
except pymongo.errors.ConnectionFailure:
continue
else:
break
else:
self.shutdown()
assert False, 'Cannot connect to the mongodb test instance'
@property
def conn(self):
return self._conn
@property
def port(self):
return self._port
def shutdown(self):
if self._process:
self._process.terminate()
self._process.wait()
self._process = None
shutil.rmtree(self._tmpdir, ignore_errors=True)
def get_uri(self):
"""
Convenience function to get a mongodb URI to the temporary database.
:return: URI
"""
return 'mongodb://localhost:{port!s}'.format(port=self.port)
@pytest.yield_fixture
def mongodb_instance():
tmp_db = MongoTemporaryInstance()
yield tmp_db
tmp_db.shutdown()
| {
"content_hash": "276423a5a1554ebdd3d6fe012558884a",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 82,
"avg_line_length": 29.654761904761905,
"alnum_prop": 0.513849859494179,
"repo_name": "its-dirg/pyop",
"id": "15f46b88bae3a816b14b0c13bbd4c1c806517132",
"size": "2491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pyop/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "146364"
}
],
"symlink_target": ""
} |
from watcher.notifications import base as notificationbase
from watcher.objects import base
from watcher.objects import fields as wfields
@base.WatcherObjectRegistry.register_notification
class StrategyPayload(notificationbase.NotificationPayloadBase):
SCHEMA = {
'uuid': ('strategy', 'uuid'),
'name': ('strategy', 'name'),
'display_name': ('strategy', 'display_name'),
'parameters_spec': ('strategy', 'parameters_spec'),
'created_at': ('strategy', 'created_at'),
'updated_at': ('strategy', 'updated_at'),
'deleted_at': ('strategy', 'deleted_at'),
}
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'uuid': wfields.UUIDField(),
'name': wfields.StringField(),
'display_name': wfields.StringField(),
'parameters_spec': wfields.FlexibleDictField(nullable=True),
'created_at': wfields.DateTimeField(nullable=True),
'updated_at': wfields.DateTimeField(nullable=True),
'deleted_at': wfields.DateTimeField(nullable=True),
}
def __init__(self, strategy, **kwargs):
super(StrategyPayload, self).__init__(**kwargs)
self.populate_schema(strategy=strategy)
| {
"content_hash": "da1a2bf8d15b47eaf044c06343ff85de",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 34.77142857142857,
"alnum_prop": 0.6409202958093673,
"repo_name": "openstack/watcher",
"id": "f7da1091996997f7d49b32594150f60fe41a023d",
"size": "1877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "watcher/notifications/strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2791159"
},
{
"name": "Shell",
"bytes": "19951"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ApplicationGatewayBackendHealthServer(Model):
"""Application gateway backendhealth http settings.
:param address: IP address or FQDN of backend server.
:type address: str
:param ip_configuration: Reference of IP configuration of backend server.
:type ip_configuration: :class:`SubResource
<azure.mgmt.network.v2017_03_01.models.SubResource>`
:param health: Health of backend server. Possible values include:
'Unknown', 'Up', 'Down', 'Partial', 'Draining'
:type health: str or :class:`ApplicationGatewayBackendHealthServerHealth
<azure.mgmt.network.v2017_03_01.models.ApplicationGatewayBackendHealthServerHealth>`
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
'ip_configuration': {'key': 'ipConfiguration', 'type': 'SubResource'},
'health': {'key': 'health', 'type': 'str'},
}
def __init__(self, address=None, ip_configuration=None, health=None):
self.address = address
self.ip_configuration = ip_configuration
self.health = health
| {
"content_hash": "cc344b5997fa6a5714367f5dbc6a3515",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 89,
"avg_line_length": 41.148148148148145,
"alnum_prop": 0.684968496849685,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "b7af66689b6d89e6fc9f29c7962b63d85af4df6f",
"size": "1585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/application_gateway_backend_health_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
} |
import unittest
from ...compatibility import StringIO
from ...relationships import Relationships
class TestInitialisation(unittest.TestCase):
"""
Test initialisation of the Relationships class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.relationships = Relationships()
self.relationships._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test Relationships xml_declaration()"""
self.relationships._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| {
"content_hash": "20f101b37ecff9b1ebbad29bcfb07e8c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 26.48,
"alnum_prop": 0.6540785498489426,
"repo_name": "jkyeung/XlsxWriter",
"id": "760160bc9298eb11b4889bb38f99c73ceff7152d",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/relationships/test_initialisation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
from common import Common, logged
from fleet import Fleet
from sikuli import *
from config import Config
from status import Status
class ResupplyRunner(Common):
def __init__(self, fleets, from_small_resuppy=False, enable_expedition_check=False, message=None):
self.fleets = fleets
self.from_small_resuppy = from_small_resuppy
self.enable_expedition_check = enable_expedition_check
self.message = message
if self.enable_expedition_check:
self.expedition_img = Status(["on_expedition"]).get_images()[0]
@logged
def run(self):
if not self.from_small_resuppy:
supply_btn = "supply.png"
else:
supply_btn = "supply_small.png"
if self.message is not None:
self.message.set_need_check(False)
self.clickWithRandomLocationAndResetMouse(supply_btn)
for fleet in self.fleets:
self.clickWithRandomOffset(fleet.getNotSelectedImage())
if not self.__need_resupply():
continue
if self.message is not None: # need record expedition check
self.message.set_need_check(True)
self.__resupply_fleet()
self.back_home_port()
return True
def __need_resupply(self):
return not self.enable_expedition_check or not exists(self.expedition_img)
@logged
def __resupply_fleet(self):
self.clickWithRandomOffset("resupply_all.png")
sleep(3)
if __name__ == "__main__":
#runner = ResupplyRunner([Fleet(1)], from_small_resuppy=False)
runner = ResupplyRunner([Fleet(1), Fleet(2), Fleet(3), Fleet(4)], from_small_resuppy=False, enable_expedition_check=True)
runner.run() | {
"content_hash": "31da0232e3395d73137a25fcd405e01f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 125,
"avg_line_length": 35.15686274509804,
"alnum_prop": 0.6157278304517568,
"repo_name": "tantinevincent/Onegai-ooyodosan",
"id": "43168038cf134f96c38c8137e6866b5ff95c830a",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resupply_runner.sikuli/resupply_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30257"
}
],
"symlink_target": ""
} |
from phonedusk.app import create_app
from phonedusk.settings import ProdConfig, DevConfig
def test_production_config():
app = create_app(ProdConfig)
assert app.config['ENV'] == 'prod'
assert app.config['DEBUG'] is False
assert app.config['DEBUG_TB_ENABLED'] is False
assert app.config['ASSETS_DEBUG'] is False
def test_dev_config():
app = create_app(DevConfig)
assert app.config['ENV'] == 'dev'
assert app.config['DEBUG'] is True
assert app.config['ASSETS_DEBUG'] is True
| {
"content_hash": "e16d2a5226285f6622e09496a755e61d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 52,
"avg_line_length": 31.9375,
"alnum_prop": 0.6966731898238747,
"repo_name": "kevana/phonedusk-server",
"id": "b8fca8a084814df625fc5209cb4da679ec855a5d",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "199065"
},
{
"name": "JavaScript",
"bytes": "240956"
},
{
"name": "Python",
"bytes": "43992"
}
],
"symlink_target": ""
} |
"""
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"blog.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "d42b211332b8b32dc0216a303ee30002",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 21.133333333333333,
"alnum_prop": 0.6277602523659306,
"repo_name": "iharsh234/eElectronics",
"id": "07eda594efc1e2a6bf736113573c28f664131055",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30568"
},
{
"name": "HTML",
"bytes": "43815"
},
{
"name": "JavaScript",
"bytes": "18858"
},
{
"name": "Python",
"bytes": "58096"
}
],
"symlink_target": ""
} |
from import_export import resources, fields
from import_export.admin import ImportExportModelAdmin
from django import forms
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.db import models
from django.http import HttpRequest
from import_export.widgets import ForeignKeyWidget
from public_interface.models import FlickrImages, LocalImages, GeneSets, Genes, \
TaxonSets, Sequences, Vouchers
from public_interface.views import change_selected
from public_interface.forms import SequencesAdminForm
class ImageInLine(admin.StackedInline):
model = LocalImages
fields = ['voucher_image']
class FlickImageInLine(admin.StackedInline):
model = FlickrImages
fields = ['image_file', 'voucher_image']
class BatchImportVouchersResource(resources.ModelResource):
class Meta:
model = Vouchers
import_id_fields = ('code',)
fields = ('code',
'flickr_photo_url',
'orden', 'superfamily', 'family', 'subfamily', 'tribe',
'subtribe', 'genus', 'species', 'subspecies', 'author',
'hostorg', 'type_species', 'country', 'specific_locality',
'collector', 'date_collection', 'date_collection_end',
'latitude', 'longitude', 'max_altitude', 'min_altitude',
'voucher_code', 'voucher', 'voucher_locality',
'determined_by', 'sex', 'extraction', 'extraction_tube',
'date_extraction', 'published_in', 'notes',
)
def save_instance(self, instance, using_transactions, dry_run=False):
if dry_run:
if instance.latitude and not coordinate_validated(instance.latitude):
raise Exception("Latitude is in wrong format: {!r}. "
"Use decimal point.".format(instance.latitude))
if instance.longitude and not coordinate_validated(instance.longitude):
raise Exception("Longitude is in wrong format: {!r}. "
"Use decimal point.".format(instance.latitude))
else:
instance.save()
def coordinate_validated(coord):
"""Sometimes user inputs coordinates with comma."""
try:
float(coord)
except ValueError:
return False
except TypeError:
return False
return True
class BatchImportSequencesResource(resources.ModelResource):
gene = fields.Field(
column_name='gene',
attribute='gene',
widget=ForeignKeyWidget(Genes, 'gene_code'),
)
class Meta:
model = Sequences
import_id_fields = ('code', 'gene')
fields = (
'code', 'gene', 'accession', 'lab_person', 'genbank', 'notes',
'sequences',
)
# Customize what and the way you show it
class VouchersAdmin(ImportExportModelAdmin):
import_template_name = 'admin/public_interface/vouchers/batch_import.html'
list_display = ['code', 'genus', 'species', 'sex', 'voucher', 'country', 'collector']
ordering = ['code']
search_fields = ['code', 'genus', 'species']
actions = ['batch_changes']
fieldsets = [('Voucher Information', {'fields': ['code', 'voucher', 'voucher_locality',
'voucher_code']}
),
('Specimen Information', {'fields': ['orden', 'superfamily', 'family',
'subfamily', 'tribe', 'subtribe',
'genus', 'species', 'subspecies',
'hostorg', 'author', 'type_species',
],
'classes': ['collapse']}),
('Collection Information', {'fields': ['country', 'specific_locality',
'latitude', 'longitude',
'max_altitude', 'min_altitude',
'collector', 'code_bold',
'date_collection',
'date_collection_end',
'determined_by',
'sex', 'extractor', 'extraction',
'extraction_tube', 'notes',
'published_in', 'date_extraction',
'edits', 'latest_editor',
],
'classes': ['collapse']}),
]
def batch_changes(self, request, queryset):
# Check that the user has change permission for the actual model
if not self.has_change_permission(request):
raise PermissionDenied
else:
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
new_request = HttpRequest()
new_request.method = 'GET'
return change_selected(new_request, ",".join(selected))
batch_changes.short_description = "Change selected in batch"
formfield_overrides = {
models.TextField: {'widget': forms.TextInput}
}
if settings.PHOTOS_REPOSITORY == 'flickr':
inlines = [FlickImageInLine, ]
else:
inlines = [ImageInLine, ]
resource_class = BatchImportVouchersResource
class SequencesAdmin(ImportExportModelAdmin):
import_template_name = 'admin/public_interface/sequences/batch_import.html'
# TODO let users know that code and genecode keywords act as AND boolean search
search_fields = ['code__code', 'gene__gene_code', 'accession']
list_display = ['code', 'gene', 'genbank', 'accession', 'lab_person',
'notes', 'time_edited', 'time_created']
fields = ['code', 'gene', 'sequences', 'genbank', 'accession',
'lab_person', 'notes']
form = SequencesAdminForm
resource_class = BatchImportSequencesResource
class TaxonSetsAdmin(admin.ModelAdmin):
list_display = ['taxonset_name', 'taxonset_creator', 'taxonset_description']
class GeneSetsAdmin(admin.ModelAdmin):
list_display = ['geneset_name', 'geneset_creator', 'geneset_description']
class GenesAdmin(admin.ModelAdmin):
list_display = ['gene_code', 'description', 'genetic_code', 'length',
'reading_frame', 'aligned', 'intron', 'prot_code',
'gene_type', 'notes']
# Register your models here.
admin.site.register(Sequences, SequencesAdmin)
admin.site.register(GeneSets, GeneSetsAdmin)
admin.site.register(Genes, GenesAdmin)
admin.site.register(TaxonSets, TaxonSetsAdmin)
admin.site.register(Vouchers, VouchersAdmin)
| {
"content_hash": "9ede5ebeb4f2d53e5ec1aa1c56b5b4bd",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 91,
"avg_line_length": 41.099415204678365,
"alnum_prop": 0.5507968127490039,
"repo_name": "carlosp420/VoSeq",
"id": "e29d92f29af80ced0e1c36729cf46a3066d173fb",
"size": "7028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public_interface/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19319"
},
{
"name": "HTML",
"bytes": "95764"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "357630"
},
{
"name": "Shell",
"bytes": "11587"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="reversescale", parent_name="parcats.line", **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "c8a6902e6a5e8a07955ca923e7ceedb7",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 34.07142857142857,
"alnum_prop": 0.6058700209643606,
"repo_name": "plotly/python-api",
"id": "7696beebcbc1c139dd0a85b2216310b484d6d202",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/parcats/line/_reversescale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
__author__ = 'weezhard'
__license__ = 'GPL'
__version__ = '1.0.0'
import libvirt
import sys
import struct
def getConnection():
try:
conn=libvirt.open("qemu:///system")
return conn
except libvirt.libvirtError, e:
print e.get_error_message()
sys.exit(1)
def delConnection(conn):
try:
conn.close()
except:
print get_error_message()
sys.exit(1)
def getAllDomains(conn):
vms = conn.listAllDomains(0)
if len(vms) != 0:
for vm in vms:
print(vm.name())
else:
print('None')
if __name__ == '__main__':
conn = getConnection()
getAllSecrets(conn)
delConnection(conn)
| {
"content_hash": "60efd3854a715fd34756eaefefdb5302",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 39,
"avg_line_length": 17.8,
"alnum_prop": 0.6243980738362761,
"repo_name": "skylost/heap",
"id": "0ec84d52c9e6bc46276214b9dd34abe488c6e759",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libvirt/libvirt_list_domains.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11206"
},
{
"name": "Ruby",
"bytes": "9778"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
} |
from django import forms
from .models import User
class UserForm(forms.ModelForm):
class Meta:
# Set this form to use the User model.
model = User
# Constrain the UserForm to just these fields.
fields = ("first_name", "last_name")
class InstagramUserForm(forms.Form):
username = forms.CharField(label='UserName',
widget=forms.TextInput(
attrs={'placeholder': 'UserName'}),
required=True)
class InstagramUserIDForm(forms.Form):
pk = forms.CharField(label='Id',
widget=forms.TextInput(
attrs={'placeholder': 'UserId'}),
required=True) | {
"content_hash": "0e697b799071609640db0b345e614037",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 64,
"avg_line_length": 27.51851851851852,
"alnum_prop": 0.5464333781965006,
"repo_name": "devrishik/timepost",
"id": "0c0331ef324ec037a2c3160cbad3eb594b8461b8",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "post_web/core/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "JavaScript",
"bytes": "104551"
},
{
"name": "Python",
"bytes": "41328"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestParallelizer(TestMultipleGpus):
# check sharding logic as well as the accuracy with single mode
def test_parallelizer_logic(self):
self.run_mnist_2gpu('auto_parallel_parallelizer.py')
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "117f729bce5745b45293040bd5438c51",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 24.5,
"alnum_prop": 0.7317784256559767,
"repo_name": "luotao1/Paddle",
"id": "2937dc33dde7868b2fee796ca13a66576ce4cd3c",
"size": "954",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/collective/fleet/test_auto_parallel_parallelizer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
bank_account = balanced.BankAccount.fetch('/bank_accounts/BA45anEaEr8g0lOhzhcE9VAN')
bank_account.credit(
amount=5000
) | {
"content_hash": "eca5d58a31f43a0a2ee9ef01f656e563",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 84,
"avg_line_length": 25.375,
"alnum_prop": 0.8275862068965517,
"repo_name": "balanced/balanced-python",
"id": "6231551d1ff5cdc01750a6fc44587e3830e7c05a",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenarios/bank_account_credit/executable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5581"
},
{
"name": "Python",
"bytes": "142737"
}
],
"symlink_target": ""
} |
"""
Download the JSON results file of pp-smokey from Jenkins. Exit with a code
which can be interpreted as a Sensu check:
http://sensuapp.org/docs/0.12/checks
Example: check-smokey-test.py smoke_test_name
To run locally, do this first:
```
export JENKINS_URL=https://deploy.preview.performance.service.gov.uk
```
Exit codes:
0 = OK
1 = WARNING
2 = CRITICAL
3+ = UNKNOWN
"""
import sys
import json
import os
import unittest
import urllib2
from collections import namedtuple
from operator import add
Step = namedtuple('Step', 'name, status')
Scenario = namedtuple('Scenario', 'name, steps')
Feature = namedtuple('Feature', 'name, uri, scenarios')
class CheckSmokeyError(StandardError):
pass
def main():
assert len(sys.argv) == 2, "Usage: {} <feature name>"
feature_name = sys.argv[1]
jenkins_url = os.environ.get('JENKINS_URL', 'http://jenkins:8080')
feature = get_feature(
load_json(download_results_json(jenkins_url)),
feature_name)
if feature is None:
raise ValueError("No such feature: {}".format(feature_name))
print_result(feature)
sys.exit(get_exit_status(feature))
# Utils
def ascii(value):
return value.encode('ascii', 'ignore')
def download_results_json(jenkins_url):
response = urllib2.urlopen(
jenkins_url + '/job/pp-smokey/lastBuild/artifact/results.json')
return response.read()
def load_json(json_content):
return json.loads(json_content)
# Loading from JSON
def get_feature(smokey_json, feature_name):
for feature_json in smokey_json:
if feature_json['uri'] == get_feature_uri(feature_name):
return Feature(
ascii(feature_json['name']),
ascii(feature_json['uri']),
map(get_scenario, find_scenarios(feature_json)))
def get_feature_uri(feature_name):
return 'features/{}.feature'.format(feature_name)
def find_scenarios(feature_json):
if 'elements' not in feature_json:
raise CheckSmokeyError("No scenarios is feature: {}".format(
feature_json))
return [element for element in feature_json['elements']
if element['keyword'] == 'Scenario']
def get_scenario(scenario_json):
return Scenario(
ascii(scenario_json['name']),
map(get_step, scenario_json['steps']))
def get_step(step):
return Step(
"{}{}".format(ascii(step['keyword']), ascii(step['name'])),
step['result']['status'])
# Counting steps
def count_failing_steps(feature):
return count_steps_by_status(feature, 'failed')
def count_passing_steps(feature):
return count_steps_by_status(feature, 'passed')
def count_steps_by_status(feature, status):
return len([step for step in get_feature_steps(feature)
if step.status == status])
def get_feature_steps(feature):
return reduce(add,
[scenario.steps for scenario in feature.scenarios])
# Rendering as text
def feature_message(feature):
return ('{failing} failed, {passing} passed;\n'
'{name} ({uri})\n{scenarios}').format(
failing=count_failing_steps(feature),
passing=count_passing_steps(feature),
name=feature.name,
uri=feature.uri,
scenarios="\n".join(map(scenario_message, feature[2])))
def scenario_message(scenario):
return " Scenario: {name}\n{steps}".format(
name=scenario.name,
steps="\n".join(map(step_message, scenario.steps)))
def step_message(step):
return ' Step: [{status}] {name}'.format(
name=step.name,
status='PASS' if step.status == "passed" else 'FAIL')
def print_result(feature):
"""
Status message for Sensu - this will show up in any alerts.
"""
print(feature_message(feature))
def get_exit_status(feature):
exit_status = 2 if count_failing_steps(feature) > 0 else 0
print("Exiting with code: {0}".format(exit_status))
return exit_status
if __name__ == '__main__':
main()
_PASS_JSON = """
[
{
"keyword": "Feature",
"name": "admin_uploader",
"line": 1,
"description": "",
"id": "admin-uploader",
"uri": "features/admin_uploader.feature",
"elements": [
{
"keyword": "Scenario",
"name": "Quickly loading the admin home page",
"line": 4,
"description": "",
"tags": [
{
"name": "@normal",
"line": 3
}
],
"id": "admin-uploader;quickly-loading-the-admin-home-page",
"type": "scenario",
"steps": [
{
"keyword": "Given ",
"name": "the admin application has booted",
"line": 5,
"match": {
"arguments": [
{
"offset": 5,
"val": "admin"
}
],
"location": "features/step_definitions/smokey_steps.rb:1"
},
"result": {
"status": "passed",
"duration": 85938650
}
},
{
"keyword": "And ",
"name": "I am benchmarking",
"line": 6,
"match": {
"location": "features/step_definitions/benchmarking_steps.rb:1"
},
"result": {
"status": "passed",
"duration": 439216
}
},
{
"keyword": "When ",
"name": "I visit the admin home page",
"line": 7,
"match": {
"location": "features/step_definitions/admin_steps.rb:11"
},
"result": {
"status": "passed",
"duration": 58540642
}
},
{
"keyword": "Then ",
"name": "the elapsed time should be less than 1 seconds",
"line": 8,
"match": {
"arguments": [
{
"offset": 37,
"val": "1"
}
],
"location": "features/step_definitions/benchmarking_steps.rb:5"
},
"result": {
"status": "passed",
"duration": 593808
}
}
]
}
]
}
]
"""
_FAIL_JSON = """
[
{
"keyword": "Feature",
"name": "admin_uploader",
"line": 1,
"description": "",
"id": "admin-uploader",
"uri": "features/admin_uploader.feature",
"elements": [
{
"keyword": "Scenario",
"name": "Quickly loading the admin home page",
"line": 4,
"description": "",
"tags": [
{
"name": "@normal",
"line": 3
}
],
"id": "admin-uploader;quickly-loading-the-admin-home-page",
"type": "scenario",
"steps": [
{
"keyword": "Given ",
"name": "the admin application has booted",
"line": 5,
"match": {
"arguments": [
{
"offset": 5,
"val": "admin"
}
],
"location": "features/step_definitions/smokey_steps.rb:1"
},
"result": {
"status": "failed",
"duration": 85938650
}
},
{
"keyword": "And ",
"name": "I am benchmarking",
"line": 6,
"match": {
"location": "features/step_definitions/benchmarking_steps.rb:1"
},
"result": {
"status": "passed",
"duration": 439216
}
},
{
"keyword": "When ",
"name": "I visit the admin home page",
"line": 7,
"match": {
"location": "features/step_definitions/admin_steps.rb:11"
},
"result": {
"status": "passed",
"duration": 58540642
}
},
{
"keyword": "Then ",
"name": "the elapsed time should be less than 1 seconds",
"line": 8,
"match": {
"arguments": [
{
"offset": 37,
"val": "1"
}
],
"location": "features/step_definitions/benchmarking_steps.rb:5"
},
"result": {
"status": "passed",
"duration": 593808
}
}
]
}
]
}
]
"""
class JsonParsingTestCase(unittest.TestCase):
def test_correctly_identifies_successful_test(self):
feature = get_feature(
load_json(_PASS_JSON),
'admin_uploader')
assert 0 == get_exit_status(feature)
def test_correctly_identifies_failed_test(self):
feature = get_feature(
load_json(_FAIL_JSON),
'admin_uploader')
assert 2 == get_exit_status(feature)
| {
"content_hash": "aea230c25dc96138c67d21e779959ffd",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 77,
"avg_line_length": 25.48314606741573,
"alnum_prop": 0.49834656084656087,
"repo_name": "alphagov/pp-puppet",
"id": "9f0644c6d09bef34f7c0d9c7cd1c95a200551108",
"size": "9113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/performanceplatform/files/check-smokey-test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "27856"
},
{
"name": "PLpgSQL",
"bytes": "833"
},
{
"name": "Puppet",
"bytes": "90831"
},
{
"name": "Python",
"bytes": "11412"
},
{
"name": "Ruby",
"bytes": "16839"
},
{
"name": "Shell",
"bytes": "10367"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from supra import views as supra
import forms
import models
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
from django.views.generic.edit import UpdateView
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from cuser.middleware import CuserMiddleware
from django.views.generic import View, DeleteView
from django.contrib.auth import authenticate, login
from django.shortcuts import redirect, get_object_or_404, HttpResponse
from empresa import models as empresa
from django.utils import timezone
from django.db.models import Q
class ListEmpresa(supra.SupraListView):
model = models.Empresa
search_key = 'q'
list_display = ['id','nit','first_name','direccion','telefono', 'celular', 'ciudad__nombre','tiendas','servicios']
search_fields = ['id']
paginate_by = 100
def servicios(self, obj, row):
edit = "/empresa/edit/empresa/%d/" % (obj.id)
delete = "/empresa/delete/empresa/%d/" % (obj.id)
return {'edit': edit, 'delete': delete}
# end def
def tiendas(self, obj, row):
tienda = models.Tienda.objects.filter(empresa__id=obj.id)
return len(tienda)
# end def
def get_queryset(self):
queryset = super(ListEmpresa, self).get_queryset()
user = CuserMiddleware.get_user()
confi = models.Empresa.objects.filter(supervisor__id=user.id,active=True)
busqueda = self.request.GET.get('search','')
confi.filter(Q(ciudad__nombre__contains=busqueda) | Q(first_name__contains=busqueda) | Q(nit__contains=busqueda))
return confi
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(ListEmpresa, self).dispatch(*args, **kwargs)
# end def
# end class
class Empresas(TemplateView):
def dispatch(self, request, *args, **kwargs):
user = CuserMiddleware.get_user()
ciu = models.Ciudad.objects.all()
return render(request, 'empresa/empresa.html',{'ciudad':ciu})
# end def
# end class
class ListCiudad(supra.SupraListView):
model = models.Empresa
search_key = 'q'
list_display = ['id','nombre','tienda__empresa__id']
search_fields = ['id']
paginate_by = 100
def get_queryset(self):
queryset = super(ListCiudad, self).get_queryset()
user = CuserMiddleware.get_user()
#confi = models.Ciudad.objects.filter(empresa__supervisor__id=user.id,status=True)
print self.request
empresa = self.request.GET.get('empresa',False)
if empresa:
confi = models.Ciudad.objects.filter(tienda__empresa__supervisor__id=user.id,status=True,tienda__empresa__id=int(empresa)).distinct('id')
else:
confi = models.Ciudad.objects.filter(tienda__empresa__supervisor__id=user.id,status=True).distinct('id')
#end if
return confi
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(ListCiudad, self).dispatch(*args, **kwargs)
# end def
# end class
class AddEmpresa(supra.SupraFormView):
model = models.Empresa
form_class = forms.EmpresaForm
template_name = 'empresa/addempresa.html'
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(AddEmpresa, self).dispatch(*args, **kwargs)
# end def
# end class
class EditEmpresa(supra.SupraFormView):
model = models.Empresa
form_class = forms.EmpresaEditForm
template_name = 'empresa/addempresa.html'
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(EditEmpresa, self).dispatch(*args, **kwargs)
# end def
# end class
class DeleteEmpresa(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(DeleteEmpresa, self).dispatch(*args, **kwargs)
# end def
def get(self, request, *args, **kwargs):
print request,kwargs
empr=kwargs['pk']
if empr:
print 1
empre = models.Empresa.objects.filter(id=empr).first()
if empre:
print 2
empre.active=False
empre.save()
return HttpResponse('[{"status":true}]', content_type='application/json', status=200)
# end if
# end if
print 3
return HttpResponse('[{"status":false}]', content_type='application/json', status=202)
# end def
# end class
class SetPassWordEmpresa(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(SetPassWordEmpresa, self).dispatch(request, *args, **kwargs)
# end def
def get(self, request, *args, **kwargs):# 359291054481645
password = request.GET.get('password',False)
identificador = request.GET.get('identificador',False)
if password and identificador :
emp = models.Empresa.objects.filter(id=request.GET.get('identificador','0')).first()
if emp :
emp.set_password(raw_password=password)
emp.save()
return HttpResponse('{"r":"Ok"}', content_type="application/json", status=200)
# end if
return HttpResponse('{"r":"Campos invalidos"}', content_type="application/json", status=400)
# end if
return HttpResponse('{"r":"Campos requeridos"}', content_type="application/json", status=400)
# end def
#
class ListTienda(supra.SupraListView):
model = models.Tienda
search_key = 'q'
list_display = ['id','nombre','direccion','ciudad','empresa','fijo']
search_fields = ['id']
paginate_by = 10
def get_queryset(self):
queryset = super(ListTienda, self).get_queryset()
user = CuserMiddleware.get_user()
empresa = self.request.GET.get('empresa',False)
ciudad = self.request.GET.get('ciudad',False)
if empresa and ciudad:
confi = models.Tienda.objects.filter(empresa__supervisor__id=user.id,empresa__id=int(empresa),ciudad__id=int(ciudad),status=True)
else:
confi = models.Tienda.objects.filter(empresa__supervisor__id=user.id,status=True)
#end if
return confi
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(ListTienda, self).dispatch(*args, **kwargs)
# end def
# end class
class AddTienda(supra.SupraFormView):
model = models.Tienda
form_class = forms.TiendaForm
template_name = 'empresa/addtienda.html'
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(AddTienda, self).dispatch(*args, **kwargs)
# end def
# end class
class DeleteTienda(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(DeleteTienda, self).dispatch(*args, **kwargs)
# end def
def get(self, request, *args, **kwargs):
print request,kwargs
ti=kwargs['pk']
if ti:
print 1
tien = models.Tienda.objects.filter(id=empr).first()
if tien:
print 2
tien.status=False
tien.save()
return HttpResponse('[{"status":true}]', content_type='application/json', status=200)
# end if
# end if
print 3
return HttpResponse('[{"status":false}]', content_type='application/json', status=202)
# end def
# end class
| {
"content_hash": "04ce7ab099be0ef49583c663a9b8b5ba",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 149,
"avg_line_length": 34.10267857142857,
"alnum_prop": 0.6407906794082995,
"repo_name": "darkdrei/GestionRegistro",
"id": "f29c518a88ff9e30bb657250112a7ecbfe2c2124",
"size": "7663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empresa/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64020"
},
{
"name": "HTML",
"bytes": "64352"
},
{
"name": "JavaScript",
"bytes": "291490"
},
{
"name": "Python",
"bytes": "91676"
}
],
"symlink_target": ""
} |
from io import BytesIO
from logging import getLogger
from typing import Mapping
from attr import define, field, fields_dict
from requests import Response
from urllib3.response import ( # type: ignore # import location false positive
HTTPHeaderDict,
HTTPResponse,
is_fp_closed,
)
from . import RichMixin
logger = getLogger(__name__)
@define(auto_attribs=False, slots=False)
class CachedHTTPResponse(HTTPResponse, RichMixin):
"""A serializable dataclass that emulates :py:class:`~urllib3.response.HTTPResponse`.
Supports streaming requests and generator usage.
The only action this doesn't support is explicitly calling :py:meth:`.read` with
``decode_content=False``.
"""
decode_content: bool = field(default=None)
# These headers are redundant and not serialized; copied in init and CachedResponse post-init
headers: HTTPHeaderDict = None # type: ignore
reason: str = field(default=None)
request_url: str = field(default=None)
status: int = field(default=0)
strict: int = field(default=0)
version: int = field(default=0)
def __init__(self, *args, body: bytes = None, headers: Mapping = None, **kwargs):
"""First initialize via HTTPResponse, then via attrs"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
super().__init__(body=BytesIO(body or b''), preload_content=False, **kwargs)
self._body = body
self.headers = HTTPHeaderDict(headers)
self.__attrs_init__(*args, **kwargs) # type: ignore # False positive in mypy 0.920+?
@classmethod
def from_response(cls, original_response: Response):
"""Create a CachedHTTPResponse based on an original response"""
# Copy basic attributes
raw = original_response.raw
copy_attrs = list(fields_dict(cls).keys()) + ['headers']
kwargs = {k: getattr(raw, k, None) for k in copy_attrs}
# Note: _request_url is not available in urllib <=1.21
kwargs['request_url'] = getattr(raw, '_request_url', None)
# Copy response data and restore response object to its original state
if hasattr(raw, '_fp') and not is_fp_closed(raw._fp):
body = raw.read(decode_content=False)
kwargs['body'] = body
raw._fp = BytesIO(body)
original_response.content # This property reads, decodes, and stores response content
# After reading, reset file pointer on original raw response
raw._fp = BytesIO(body)
raw._fp_bytes_read = 0
raw.length_remaining = len(body)
return cls(**kwargs) # type: ignore # False positive in mypy 0.920+?
def release_conn(self):
"""No-op for compatibility"""
def read(self, amt=None, decode_content=None, **kwargs):
"""Simplified reader for cached content that emulates
:py:meth:`urllib3.response.HTTPResponse.read()`
"""
if 'content-encoding' in self.headers and decode_content is False:
logger.warning('read(decode_content=False) is not supported for cached responses')
data = self._fp.read(amt)
# "close" the file to inform consumers to stop reading from it
if not data:
self._fp.close()
return data
def reset(self, body: bytes = None):
"""Reset raw response file pointer, and optionally update content"""
if body is not None:
self._body = body
self._fp = BytesIO(self._body or b'')
def set_content(self, body: bytes):
self._body = body
self.reset()
def stream(self, amt=None, **kwargs):
"""Simplified generator over cached content that emulates
:py:meth:`urllib3.response.HTTPResponse.stream()`
"""
while not self._fp.closed:
yield self.read(amt=amt, **kwargs)
| {
"content_hash": "6ed9701e6435f9669dbe938c5ea6df26",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 98,
"avg_line_length": 38.118811881188115,
"alnum_prop": 0.64,
"repo_name": "reclosedev/requests-cache",
"id": "9c01b8846fbb7027602cbc19d1753335e3d63431",
"size": "3850",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "requests_cache/models/raw_response.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "292209"
}
],
"symlink_target": ""
} |
import json
import os
from aldryn_client import forms
SYSTEM_FIELD_WARNING = 'WARNING: this field is auto-written. Please do not change it here.'
class Form(forms.BaseForm):
permissions_enabled = forms.CheckboxField(
'Enable permission checks',
required=False,
initial=True,
)
cms_templates = forms.CharField(
'CMS Templates',
required=True,
initial='[["default.html", "Default"]]',
help_text=SYSTEM_FIELD_WARNING,
)
boilerplate_name = forms.CharField(
'Boilerplate Name',
required=False,
initial='',
help_text=SYSTEM_FIELD_WARNING,
)
def to_settings(self, data, settings):
from functools import partial
from django.core.urlresolvers import reverse_lazy
from aldryn_addons.utils import boolean_ish, djsenv
env = partial(djsenv, settings=settings)
# Need to detect if these settings are for Django 1.8+
# Is there a better way? Can't import django to check version =(
is_django_18_or_later = ('TEMPLATES' in settings)
# Core CMS stuff
settings['INSTALLED_APPS'].extend([
'cms',
# 'aldryn_django_cms' must be after 'cms', otherwise we get
# import time exceptions on other packages (e.g alryn-bootstrap3
# returns:
# link_page = cms.models.fields.PageField(
# AttributeError: 'module' object has no attribute 'fields'
# )
'aldryn_django_cms',
'menus',
'sekizai',
'treebeard',
'reversion',
])
# TODO: break out this stuff into other addons
settings['INSTALLED_APPS'].extend([
'parler',
])
settings['INSTALLED_APPS'].insert(
settings['INSTALLED_APPS'].index('django.contrib.admin'),
'djangocms_admin_style',
)
if is_django_18_or_later:
settings['MIGRATION_MODULES'] = {
'cmsplugin_filer_file': 'cmsplugin_filer_file.migrations_django',
'cmsplugin_filer_image': 'cmsplugin_filer_image.migrations_django',
'cmsplugin_filer_folder': 'cmsplugin_filer_folder.migrations_django',
'cmsplugin_filer_link': 'cmsplugin_filer_link.migrations_django',
'cmsplugin_filer_teaser': 'cmsplugin_filer_teaser.migrations_django',
'cmsplugin_filer_video': 'cmsplugin_filer_video.migrations_django',
}
if is_django_18_or_later:
settings['TEMPLATES'][0]['OPTIONS']['context_processors'].extend([
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
])
else:
settings['TEMPLATE_CONTEXT_PROCESSORS'].extend([
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
])
settings['MIDDLEWARE_CLASSES'].extend([
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
])
settings['MIDDLEWARE_CLASSES'].insert(
settings['MIDDLEWARE_CLASSES'].index(
'django.middleware.common.CommonMiddleware'
),
'cms.middleware.utils.ApphookReloadMiddleware',
)
settings['ADDON_URLS_I18N_LAST'] = 'cms.urls'
settings['CMS_PERMISSION'] = data['permissions_enabled']
old_cms_templates_json = os.path.join(settings['BASE_DIR'], 'cms_templates.json')
if os.path.exists(old_cms_templates_json):
# Backwards compatibility with v2
with open(old_cms_templates_json) as fobj:
templates = json.load(fobj)
else:
templates= settings.get('CMS_TEMPLATES', json.loads(data['cms_templates']))
settings['CMS_TEMPLATES'] = templates
# languages
language_codes = [code for code, lang in settings['LANGUAGES']]
settings['CMS_LANGUAGES'] = {
'default': {
'fallbacks': [fbcode for fbcode in language_codes],
'redirect_on_fallback': True,
'public': True,
'hide_untranslated': False,
},
1: [
{
'code': code,
'name': settings['ALL_LANGUAGES_DICT'][code],
'fallbacks': [fbcode for fbcode in language_codes if fbcode != code],
'public': True
} for code in language_codes
]
}
settings['PARLER_LANGUAGES'] = {}
for site_id, languages in settings['CMS_LANGUAGES'].items():
if isinstance(site_id, int):
langs = [
{
'code': lang['code'],
'fallbacks': [fbcode for fbcode in language_codes if fbcode != lang['code']]
} for lang in languages
]
settings['PARLER_LANGUAGES'].update({site_id: langs})
parler_defaults = {'fallback': settings['LANGUAGE_CODE']}
for k, v in settings['CMS_LANGUAGES'].get('default', {}).items():
if k in ['hide_untranslated', ]:
parler_defaults.update({k: v})
settings['PARLER_LANGUAGES'].update({'default': parler_defaults})
# aldryn-boilerplates and aldryn-snake
# FIXME: Make ALDRYN_BOILERPLATE_NAME a configurable parameter
settings['ALDRYN_BOILERPLATE_NAME'] = env(
'ALDRYN_BOILERPLATE_NAME',
data.get('boilerplate_name', 'legacy'),
)
settings['INSTALLED_APPS'].append('aldryn_boilerplates')
if is_django_18_or_later:
settings['TEMPLATES'][0]['OPTIONS']['context_processors'].extend([
'aldryn_boilerplates.context_processors.boilerplate',
'aldryn_snake.template_api.template_processor',
])
settings['TEMPLATES'][0]['OPTIONS']['loaders'].insert(
settings['TEMPLATE_LOADERS'].index(
'django.template.loaders.app_directories.Loader'),
'aldryn_boilerplates.template_loaders.AppDirectoriesLoader'
)
else:
settings['TEMPLATE_CONTEXT_PROCESSORS'].extend([
'aldryn_boilerplates.context_processors.boilerplate',
'aldryn_snake.template_api.template_processor',
])
settings['TEMPLATE_LOADERS'].insert(
settings['TEMPLATE_LOADERS'].index(
'django.template.loaders.app_directories.Loader'),
'aldryn_boilerplates.template_loaders.AppDirectoriesLoader'
)
settings['STATICFILES_FINDERS'].insert(
settings['STATICFILES_FINDERS'].index('django.contrib.staticfiles.finders.AppDirectoriesFinder'),
'aldryn_boilerplates.staticfile_finders.AppDirectoriesFinder',
)
# django sitemap support
settings['INSTALLED_APPS'].append('django.contrib.sitemaps')
# django-compressor
settings['INSTALLED_APPS'].append('compressor')
settings['STATICFILES_FINDERS'].append('compressor.finders.CompressorFinder')
# django-robots
settings['INSTALLED_APPS'].append('robots')
# django-filer
settings['INSTALLED_APPS'].extend([
'filer',
'easy_thumbnails',
'mptt',
'polymorphic',
])
settings['FILER_DEBUG'] = boolean_ish(env('FILER_DEBUG', settings['DEBUG']))
settings['FILER_ENABLE_LOGGING'] = boolean_ish(env('FILER_ENABLE_LOGGING', True))
settings['FILER_IMAGE_USE_ICON'] = True
settings['ADDON_URLS'].append(
'filer.server.urls'
)
# easy-thumbnails
settings['INSTALLED_APPS'].extend([
'easy_thumbnails',
])
settings['THUMBNAIL_QUALITY'] = env('THUMBNAIL_QUALITY', 90)
# FIXME: enabling THUMBNAIL_HIGH_RESOLUTION causes timeouts/500!
settings['THUMBNAIL_HIGH_RESOLUTION'] = False
settings['THUMBNAIL_PRESERVE_EXTENSIONS'] = ['png', 'gif']
settings['THUMBNAIL_PROCESSORS'] = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
settings['THUMBNAIL_SOURCE_GENERATORS'] = (
'easy_thumbnails.source_generators.pil_image',
)
settings['THUMBNAIL_CACHE_DIMENSIONS'] = True
# commented out because fix-tree has a major bug
# this should be ok with CMS >=3.1.4
# settings['MIGRATION_COMMANDS'].append(
# 'python manage.py cms fix-tree --noinput'
# )
# default plugins
settings['INSTALLED_APPS'].extend([
'djangocms_text_ckeditor',
'djangocms_link',
'djangocms_snippet',
'djangocms_googlemap',
# cmsplugin-filer
'cmsplugin_filer_file',
'cmsplugin_filer_image',
# required by aldryn-forms
'captcha',
])
# boilerplate must provide /static/js/modules/ckeditor.wysiwyg.js and /static/css/base.css
CKEDITOR_SETTINGS = {
'height': 300,
'language': '{{ language }}',
'toolbar': 'CMS',
'skin': 'moono',
'extraPlugins': 'cmsplugins',
'toolbar_HTMLField': [
['Undo', 'Redo'],
['cmsplugins', '-', 'ShowBlocks'],
['Format', 'Styles'],
['TextColor', 'BGColor', '-', 'PasteText', 'PasteFromWord'],
['Maximize', ''],
'/',
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight'],
['HorizontalRule'],
['Link', 'Unlink'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Table'],
['Source'],
['Link', 'Unlink', 'Anchor'],
],
}
boilerplate_name = settings['ALDRYN_BOILERPLATE_NAME']
if boilerplate_name == 'bootstrap3':
CKEDITOR_SETTINGS['stylesSet'] = 'default:/static/js/addons/ckeditor.wysiwyg.js'
CKEDITOR_SETTINGS['contentsCss'] = ['/static/css/base.css']
else:
CKEDITOR_SETTINGS['stylesSet'] = 'default:/static/js/modules/ckeditor.wysiwyg.js'
CKEDITOR_SETTINGS['contentsCss'] = ['/static/css/base.css']
# select2 (required by djangocms_link plugin)
settings['INSTALLED_APPS'].extend([
'django_select2',
])
settings['ADDON_URLS'].append('aldryn_django_cms.urls')
settings['ADDON_URLS_I18N'].append('aldryn_django_cms.urls_i18n')
if 'ALDRYN_SSO_LOGIN_WHITE_LIST' in settings:
# stage sso enabled
# add internal endpoints that do not require authentication
settings['ALDRYN_SSO_LOGIN_WHITE_LIST'].append(reverse_lazy('cms-check-uninstall'))
# this is an internal django-cms url
# which gets called when a user logs out from toolbar
settings['ALDRYN_SSO_LOGIN_WHITE_LIST'].append(reverse_lazy('admin:cms_page_resolve'))
return settings
| {
"content_hash": "f0cf8be452da892b487ae054536eac00",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 109,
"avg_line_length": 39.17785234899329,
"alnum_prop": 0.5640256959314776,
"repo_name": "JimyRyan/jimyryan-djangocms",
"id": "40f6ea1c1976e7d203352cc3a3a20323a88d0650",
"size": "11699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/addons/aldryn-django-cms/aldryn_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "252376"
},
{
"name": "HTML",
"bytes": "195079"
},
{
"name": "JavaScript",
"bytes": "69122"
},
{
"name": "Nginx",
"bytes": "1214"
},
{
"name": "Python",
"bytes": "48234"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
} |
import ShareYourSystem as SYS
#Point direct with a special Key str
MyPointer=SYS.PointerClass().point(
#PointingToGetVariable
'/ChildPointer/GrandChildPointer',
#PointingToSetKeyVariable
'MyGrandChildPointer'
)
#print
print('MyPointer is')
SYS._print(MyPointer)
| {
"content_hash": "9d1321d42d804f2adc30c3b9fa9bd1e8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 37,
"avg_line_length": 16.58823529411765,
"alnum_prop": 0.7624113475177305,
"repo_name": "Ledoux/ShareYourSystem",
"id": "be728698d0b18bea7c59fc19271fa61545de4ede",
"size": "298",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Standards/Itemizers/Pointer/draft/01_ExampleDoc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
'''Test the command ouput action.'''
import datetime
import unittest
import action
class TestSpeakShellCommandOutput(unittest.TestCase):
def _say(self, text):
self._say_text = text
def setUp(self):
self._say_text = None
def test_say_receives_output(self):
action.SpeakShellCommandOutput(self._say, 'echo test', None).run(None)
self.assertEqual(self._say_text, 'test')
def test_say_receives_failure_text(self):
action.SpeakShellCommandOutput(self._say, 'echo', 'failure').run(None)
self.assertEqual(self._say_text, 'failure')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "dbf2993a2c946a98a3583b54adcd668e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 23.925925925925927,
"alnum_prop": 0.6563467492260062,
"repo_name": "hanmy75/voice-recognizer",
"id": "dd0c2a92605a09d61f09dd4d46f41bd4ee1fdcb4",
"size": "1222",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_speak_shell_command_output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "357"
},
{
"name": "Python",
"bytes": "164594"
},
{
"name": "Shell",
"bytes": "5841"
}
],
"symlink_target": ""
} |
from tempest.api.compute import base
from tempest import exceptions
from tempest.test import attr
class ServicesAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Services API. List and Enable/Disable require admin privileges.
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ServicesAdminNegativeTestJSON, cls).setUpClass()
cls.client = cls.os_adm.services_client
cls.non_admin_client = cls.services_client
@attr(type=['negative', 'gate'])
def test_list_services_with_non_admin_user(self):
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_services)
@attr(type=['negative', 'gate'])
def test_get_service_by_invalid_params(self):
# return all services if send the request with invalid parameter
resp, services = self.client.list_services()
params = {'xxx': 'nova-compute'}
resp, services_xxx = self.client.list_services(params)
self.assertEqual(200, resp.status)
self.assertEqual(len(services), len(services_xxx))
@attr(type=['negative', 'gate'])
def test_get_service_by_invalid_service_and_valid_host(self):
resp, services = self.client.list_services()
host_name = services[0]['host']
params = {'host': host_name, 'binary': 'xxx'}
resp, services = self.client.list_services(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(services))
@attr(type=['negative', 'gate'])
def test_get_service_with_valid_service_and_invalid_host(self):
resp, services = self.client.list_services()
binary_name = services[0]['binary']
params = {'host': 'xxx', 'binary': binary_name}
resp, services = self.client.list_services(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(services))
class ServicesAdminNegativeTestXML(ServicesAdminNegativeTestJSON):
_interface = 'xml'
| {
"content_hash": "bde65f1cdfc8fa776788419d99c9fda9",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 73,
"avg_line_length": 37.129629629629626,
"alnum_prop": 0.6623441396508728,
"repo_name": "BeenzSyed/tempest",
"id": "a1809c4e22e9074ac9c6374c203b507077e7ad51",
"size": "2636",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/compute/admin/test_services_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2613370"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
} |
"""Handles database requests from other nova services."""
import copy
import itertools
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import image
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as nova_object
from nova import quota
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='2.1')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.additional_endpoints.append(self.compute_task_mgr)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
@messaging.expected_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service):
for key, value in six.iteritems(updates):
if key not in allowed_updates:
LOG.error(_LE("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, six.string_types):
updates[key] = timeutils.parse_strtime(value)
instance = objects.Instance(context=context, uuid=instance_uuid,
**updates)
instance.obj_reset_changes(['uuid'])
instance.save()
return nova_object.obj_to_primitive(instance)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def instance_get_all_by_host(self, context, host, node,
columns_to_join):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed, update_cells):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
# NOTE(danms): This can be removed in version 3.0 of the RPC API
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
# NOTE(ndipanov): This can be removed in version 3.0 of the RPC API
def block_device_mapping_update_or_create(self, context, values, create):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
bdm_obj = objects.BlockDeviceMapping._from_db_object(
context, objects.BlockDeviceMapping(), bdm)
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm_obj,
create=create)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join,
use_slave):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join, use_slave=use_slave)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def instance_get_active_by_window_joined(self, context, begin, end,
project_id, host):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def instance_destroy(self, context, instance):
if not isinstance(instance, objects.Instance):
instance = objects.Instance._from_db_object(context,
objects.Instance(),
instance)
instance.destroy()
return nova_object.obj_to_primitive(instance)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v3.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed, update_totals):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
# NOTE(hanlind): This method can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic, host, binary):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# NOTE(sbauza): Only Juno computes are still calling this
# conductor method for getting service_get_by_compute_node,
# but expect a compute_node field so we can safely add it.
result['compute_node'
] = objects.ComputeNodeList.get_all_by_host(
context, result['host'])
# FIXME(comstud) Potentially remove this on bump to v3.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_host_and_binary(
context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.InstanceActionNotFound)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.InstanceActionNotFound,
exception.InstanceActionEventNotFound)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
# NOTE(hanlind): This method can be removed in version 3.0 of the RPC API
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
# NOTE(hanlind): This method can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def compute_node_update(self, context, node, values):
result = self.db.compute_node_update(context, node['id'], values)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
# NOTE(hanlind): This method can be removed in version 3.0 of the RPC API
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items, message):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def notify_usage_exists(self, context, instance, current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info):
if not isinstance(instance, objects.Instance):
attrs = ['metadata', 'system_metadata']
instance = objects.Instance._from_db_object(context,
objects.Instance(),
instance,
expected_attrs=attrs)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
# NOTE(hanlind): This method can be removed in version 3.0 of the RPC API
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
image_id = instance.get('%s_id' % image_type)
if image_id is not None:
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
# NOTE(hanlind): This can be removed in version 3.0 of the RPC API
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(*args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
args = tuple([context] + list(args))
result = self._object_dispatch(objclass, objmethod, args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
return (result.obj_to_primitive(target_version=objver)
if isinstance(result, nova_object.NovaObject) else result)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport(self, context, objinst, target_version):
return objinst.obj_to_primitive(target_version=target_version)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.11')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.scheduler_client = scheduler_client.SchedulerClient()
@messaging.expected_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSafe,
exception.UnsupportedPolicyException)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE(melwitt): Remove this in version 2.0 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations, clean_shutdown)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations, clean_shutdown):
image = utils.get_image_from_system_metadata(
instance.system_metadata)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
scheduler_utils.populate_retry(filter_properties, instance.uuid)
hosts = self.scheduler_client.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
# if the flavor IDs match, it's migrate; otherwise resize
if flavor['id'] == instance.instance_type_id:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node,
clean_shutdown=clean_shutdown)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance.uuid, },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.migration_type = 'live-migration'
if instance.obj_attr_is_set('flavor'):
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance.flavor.id
else:
migration.old_instance_type_id = instance.instance_type_id
migration.new_instance_type_id = instance.instance_type_id
migration.create()
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit,
migration)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSafe) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
_set_vm_state(context, instance, ex, instance.vm_state)
migration.status = 'error'
migration.save()
except Exception as ex:
LOG.error(_LE('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.'),
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
_set_vm_state(context, instance, ex, vm_states.ERROR,
instance.task_state)
migration.status = 'failed'
migration.save()
raise exception.MigrationError(reason=six.text_type(ex))
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
try:
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
scheduler_utils.populate_retry(filter_properties,
instances[0].uuid)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
except Exception as exc:
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates,
exc, request_spec)
return
for (instance, host) in itertools.izip(instances, hosts):
try:
instance.refresh()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
filter_properties = {}
scheduler_utils.populate_retry(filter_properties,
instance.uuid)
hosts = self._schedule_instances(context, image,
filter_properties,
instance)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning(_LW("No valid host found for unshelve instance"),
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error(_LE("Unshelve attempted but an error "
"has occurred"), instance=instance)
else:
LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
if not host:
# NOTE(lcostantino): Retrieve scheduler filters for the
# instance when the feature is available
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(context,
image_ref,
[instance])
try:
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
hosts = self.scheduler_client.select_destinations(context,
request_spec,
filter_properties)
host = hosts.pop(0)['host']
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("No valid host found for rebuild"),
instance=instance)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("Server with unsupported policy "
"cannot be rebuilt"),
instance=instance)
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
| {
"content_hash": "8e6ce757d964c240cfd20bc5167ce0f1",
"timestamp": "",
"source": "github",
"line_count": 853,
"max_line_length": 79,
"avg_line_length": 49.89800703399766,
"alnum_prop": 0.5623193853816695,
"repo_name": "kimjaejoong/nova",
"id": "c1327b716b2ac03dbe54e689f6406a5d50dbb711",
"size": "43168",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/conductor/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16290670"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "282020"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from koica.views import QuestionListView, QuestionCreateView, QuestionDetailView, AnswerCreateView, CommentCreateView, CommentAnswerCreateView, updateQuestionRatingView, QuestionRatingView, AnswerRatingView, updateAnswerRatingView, approveAnswerView, AnswerApprovedView, QuestionTagView, QuestionEditView, AnswerEditView, QuestionReportDuplicatedView
urlpatterns = [
url(r'^tag/(?P<slug>[-_\w]+)/$', QuestionTagView.as_view(), name='question-tag'),
url(r'^$', QuestionListView.as_view(), name='question-list'),
url(r'^add/$', login_required(QuestionCreateView.as_view()), name="question-add"),
url(r'^repondre/(?P<slug>[-_\w]+)/$', login_required(AnswerCreateView.as_view()), name='answer-form'),
url(r'^editer/(?P<slug>[-_\w]+)/$', login_required(QuestionEditView.as_view()), name='edit-form'),
url(r'^reponse/editer/(?P<pk>[0-9]+)/$', login_required(AnswerEditView.as_view()), name='answer-edit-form'),
url(r'^commenter/(?P<slug>[-_\w]+)/(?P<answer_pk>\d+)/$', login_required(CommentAnswerCreateView.as_view()), name='comment-answer-form'),
url(r'^commenter/(?P<slug>[-_\w]+)/$', login_required(CommentCreateView.as_view()), name='comment-form'),
url(r'^(?P<slug>[-_\w]+)/rating/$', QuestionRatingView.as_view(), name='question-rating-view'),
url(r'^answer/(?P<pk>[0-9]+)/rating/$', AnswerRatingView.as_view(), name='answer-rating'),
url(r'^(?P<slug>[-_\w]+)/rating/(?P<operator>[\w]+)/$', updateQuestionRatingView, name='question-rating-rate'),
url(r'^answer/(?P<pk>[0-9]+)/rating/(?P<operator>[\w]+)/$', updateAnswerRatingView, name='anwser-rating-rate'),
url(r'^(?P<answer_id>[0-9]+)/approve/$', login_required(approveAnswerView), name='approve-answer'),
url(r'^(?P<pk>[0-9]+)/approved/$', login_required(AnswerApprovedView.as_view()), name='answer-approved'),
url(r'^(?P<slug>[-_\w]+)/duplicat/signaler/$', QuestionReportDuplicatedView.as_view(), name='question-report-duplicated'),
url(r'^(?P<slug>[-_\w]+)/$', QuestionDetailView.as_view(), name='question-detail'),
] | {
"content_hash": "e5afcf8b69861235273f4ea5a480e63e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 350,
"avg_line_length": 96.81818181818181,
"alnum_prop": 0.6882629107981221,
"repo_name": "synw/django-koica",
"id": "8f53c7e8f36435113683c10bf999020c08554e75",
"size": "2130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "koica/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1544"
},
{
"name": "HTML",
"bytes": "20549"
},
{
"name": "JavaScript",
"bytes": "3959"
},
{
"name": "Python",
"bytes": "34132"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django import dispatch
cas_user_authenticated = dispatch.Signal(
providing_args=['user', 'created', 'attributes', 'ticket', 'service'],
)
| {
"content_hash": "51e6506a49639281b3592685cb071837",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 74,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.7219251336898396,
"repo_name": "11h42/django-cas-ng",
"id": "0a01144169f7544220b9d1d72bf254258ede3e43",
"size": "187",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django_cas_ng/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "322"
},
{
"name": "Makefile",
"bytes": "231"
},
{
"name": "Python",
"bytes": "44672"
}
],
"symlink_target": ""
} |
from bayesdb.client import Client
from cmd2 import Cmd
import sys
class BayesDBApp(Cmd):
"""Provides "interactive mode" features."""
Cmd.redirector = '>>>'
def __init__(self, client):
self.client = client
self.prompt = 'bql> '
Cmd.__init__(self, 'tab')
def do_show(self, line):
self.client('show ' + str(line))
def do_list(self, line):
self.client('list ' + str(line))
def do_analyze(self, line):
self.client('analyze ' + str(line))
def do_execute(self, line):
self.client('execute ' + str(line))
def do_drop(self, line):
self.client('drop ' + str(line))
def do_initialize(self, line):
self.client('initialize ' + str(line))
def do_create(self, line):
self.client('create ' + str(line))
def do_infer(self, line):
self.client('infer ' + str(line))
def do_select(self, line):
self.client('select ' + str(line))
def do_simulate(self, line):
self.client('simulate ' + str(line))
def do_save(self, line):
self.client('save ' + str(line))
def do_load(self, line):
self.client('load ' + str(line))
def do_estimate(self, line):
self.client('estimate ' + str(line))
def do_update(self, line):
self.client('update ' + str(line))
def do_help(self, line):
self.client('help ' + str(line))
def default(self, line):
self.client(str(line))
def run_command_line():
# Get command line arguments to specify hostname and port
hostname = None
port = None
if len(sys.argv) > 1:
# Treat the first argument as hostname[:port]
input = sys.argv[1].split(':')
hostname = input[0]
if len(input) == 1:
client = Client(hostname)
print "Using hostname %s." % hostname
if len(input) == 2:
port = int(input[1])
client = Client(hostname, port)
print "Using hostname %s, port %d" % (hostname, port)
elif len(input) > 2:
print "Run with 'python bql [hostname[:port]]'"
else:
client = Client()
print """Welcome to BayesDB. You may enter BQL commands directly into this prompt. Type 'help' for help, and 'quit' to quit."""
app = BayesDBApp(client)
app.cmdloop()
if __name__ == "__main__":
run_command_line()
| {
"content_hash": "221b07e94474dddd21d5bd734608e78d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 129,
"avg_line_length": 24.96590909090909,
"alnum_prop": 0.6144742831133364,
"repo_name": "poppingtonic/BayesDB",
"id": "579a32f6936fdf805f5e7033de3cfe424ab6b78b",
"size": "3002",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bayesdb/bql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "515"
},
{
"name": "HTML",
"bytes": "941719"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "Python",
"bytes": "698491"
},
{
"name": "Ruby",
"bytes": "1866"
},
{
"name": "Shell",
"bytes": "817"
}
],
"symlink_target": ""
} |
from learntools.core import *
from learntools.core.asserts import assert_equal
from learntools.time_series.checking_utils import load_average_sales, load_holidays_events
class Q1(ThoughtExperiment): # Determine seasonality
_solution = """Both the seasonal plot and the periodogram suggest a strong weekly seasonality. From the periodogram, it appears there may be some monthly and biweekly components as well. In fact, the notes to the *Store Sales* dataset say wages in the public sector are paid out biweekly, on the 15th and last day of the month -- a possible origin for these seasons.
"""
class Q2(CodingProblem): # Create seasonal features
_vars = ['dp', 'X']
def check(self, dp, X):
from statsmodels.tsa.deterministic import (CalendarFourier,
DeterministicProcess)
y = load_average_sales()['2017']
fourier = CalendarFourier(freq='M', order=4)
dp = DeterministicProcess(
index=y.index,
constant=True,
order=1,
seasonal=True,
additional_terms=[fourier],
drop=True,
)
X_true = dp.in_sample()
import pandas as pd
assert all(
dp._index == y.index
), f"`index` argument to `DeterministicProcess` should be `y.index`. You gave {dp._index}."
assert dp._constant, f"`constant` argument to `DeterministicProcess` should be `True`. You gave {dp._constant}."
assert dp._order == 1, f"`order` argument to `DeterministicProcess` should be `1`. You gave {dp._order}."
assert dp._seasonal, f"`seasonal` argument to `DeterministicProcess` should be `True`. You gave {dp._seasonal}."
assert len(
dp._additional_terms
) == 1, f"`additional_terms` argument to `DeterministicProcess` should be `[fourier]`. You gave {dp._additional_terms}."
assert isinstance(
dp._additional_terms[0], CalendarFourier
), f"`additional_terms` argument to `DeterministicProcess` should be `[fourier]`. You gave {dp._additional_terms}."
assert dp._additional_terms[
0]._order == 4, f"`order` argument to `CalendarFourier` should be `4`. You gave {dp._additional_terms[0]._order}."
assert isinstance(
dp._additional_terms[0]._freq, pd.offsets.MonthEnd
), f"`freq` argument to `CalendarFourier` should be `'M'`."
assert dp._drop, f"`additional_terms` argument to `DeterministicProcess` should be `True`. You gave {dp._drop}."
assert_equal(X, X_true, 'X')
_hint = """Your answer should look like:
```python
y = average_sales.copy()
fourier = CalendarFourier(____)
dp = DeterministicProcess(
index=y.index,
constant=True,
order=1,
seasonal=____,
additional_terms=[____],
drop=True,
)
X = dp.in_sample()
```
"""
_solution = CS("""
y = average_sales.copy()
fourier = CalendarFourier(freq='M', order=4)
dp = DeterministicProcess(
index=y.index,
constant=True,
order=1,
seasonal=True,
additional_terms=[fourier],
drop=True,
)
X = dp.in_sample()
""")
class Q3(ThoughtExperiment): # Check for remaining seasonality
_solution = """The periodogram for the deseasonalized series lacks any large values. By comparing it to the periodogram for the original series, we can see that our model was able to capture the seasonal variation in *Average Sales*.
"""
class Q4(EqualityCheckProblem): # Create holiday features
import pandas as pd
holidays_events = load_holidays_events()
# National and regional holidays in the training set
holidays = ( #
holidays_events #
.query("locale in ['National', 'Regional']") #
.loc['2017':'2017-08-15', ['description']] #
.assign(
description=lambda x: x.description.cat.remove_unused_categories()
) #
) #
X_holidays = pd.get_dummies(holidays).to_numpy()
_vars = ['X_holidays']
_expected = [X_holidays]
_hints = [
"""With Pandas, you could use `pd.get_dummies`. With scikit-learn, you could use `sklearn.preprocessing.OneHotEncoder`. Using Pandas makes it easier to join `X_holidays` to `X2` since it returns a `DataFrame` retaining the date of each holiday.""",
"""In Pandas, your solution would look like:
```python
X_holidays = pd.get_dummies(____)
X2 = X.join(X_holidays, on='date').fillna(0.0)
```
<p>
In scikit-learn, your solution would look like:
```python
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(sparse=False)
X_holidays = pd.DataFrame(
____,
index=____,
columns=holidays.description.unique(), # optional, but nice to have
)
X2 = X.join(X_holidays, on='date').fillna(0.0)
```
"""
]
_solution = CS("""
# Scikit-learn solution
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(sparse=False)
X_holidays = pd.DataFrame(
ohe.fit_transform(holidays),
index=holidays.index,
columns=holidays.description.unique(),
)
# Pandas solution
X_holidays = pd.get_dummies(holidays)
# Join to training data
X2 = X.join(X_holidays, on='date').fillna(0.0)
""")
qvars = bind_exercises(globals(), [Q1, Q2, Q3, Q4], var_format="q_{n}")
__all__ = list(qvars)
| {
"content_hash": "0cde348f34ddc343b88a20b20630ba3c",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 370,
"avg_line_length": 33.81410256410256,
"alnum_prop": 0.6470142180094787,
"repo_name": "Kaggle/learntools",
"id": "05022dae40ca95cdfa5df84d2cf6aedc02518ab1",
"size": "5275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learntools/time_series/ex3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2923820"
},
{
"name": "Python",
"bytes": "733115"
},
{
"name": "Shell",
"bytes": "25940"
}
],
"symlink_target": ""
} |
"""Runs both the Python and Java tests."""
import optparse
import os
import sys
import time
from pylib import buildbot_report
from pylib import constants
from pylib import ports
from pylib.base.test_result import TestResults
from pylib.host_driven import run_python_tests
from pylib.instrumentation import apk_info
from pylib.instrumentation import run_java_tests
from pylib.utils import run_tests_helper
from pylib.utils import test_options_parser
def DispatchInstrumentationTests(options):
"""Dispatches the Java and Python instrumentation tests, sharding if possible.
Uses the logging module to print the combined final results and
summary of the Java and Python tests. If the java_only option is set, only
the Java tests run. If the python_only option is set, only the python tests
run. If neither are set, run both Java and Python tests.
Args:
options: command-line options for running the Java and Python tests.
Returns:
An integer representing the number of broken tests.
"""
if not options.keep_test_server_ports:
# Reset the test port allocation. It's important to do it before starting
# to dispatch any tests.
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
start_date = int(time.time() * 1000)
java_results = TestResults()
python_results = TestResults()
if options.run_java_tests:
java_results = run_java_tests.DispatchJavaTests(
options,
[apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)])
if options.run_python_tests:
python_results = run_python_tests.DispatchPythonTests(options)
all_results = TestResults.FromTestResults([java_results, python_results])
all_results.LogFull(
test_type='Instrumentation',
test_package=options.test_apk,
annotation=options.annotation,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
return len(all_results.GetAllBroken())
def main(argv):
option_parser = optparse.OptionParser()
test_options_parser.AddInstrumentationOptions(option_parser)
options, args = option_parser.parse_args(argv)
test_options_parser.ValidateInstrumentationOptions(option_parser, options,
args)
run_tests_helper.SetLogLevel(options.verbose_count)
ret = 1
try:
ret = DispatchInstrumentationTests(options)
finally:
buildbot_report.PrintStepResultIfNeeded(options, ret)
return ret
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "46f78b4275a1ce9e80d849fdfffb5b71",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 32.46835443037975,
"alnum_prop": 0.7360623781676413,
"repo_name": "nacl-webkit/chrome_deps",
"id": "43925ae78a304150678b7d310f663162dc430e0f",
"size": "2756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/android/run_instrumentation_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1173441"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "74568368"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "156174457"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3088381"
},
{
"name": "JavaScript",
"bytes": "18179048"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "3044"
},
{
"name": "Objective-C",
"bytes": "6965520"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932725"
},
{
"name": "Python",
"bytes": "8458718"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1526176"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XSLT",
"bytes": "13493"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import atexit
import shutil
import signal
import tempfile
import threading
import grpc
from apache_beam.options import pipeline_options
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.runners.portability import local_job_service
from apache_beam.utils import subprocess_server
from apache_beam.version import __version__ as beam_version
class JobServer(object):
def start(self):
"""Starts this JobServer, returning a grpc service to which to submit jobs.
"""
raise NotImplementedError(type(self))
def stop(self):
"""Stops this job server."""
raise NotImplementedError(type(self))
class ExternalJobServer(JobServer):
def __init__(self, endpoint, timeout=None):
self._endpoint = endpoint
self._timeout = timeout
def start(self):
# type: () -> beam_job_api_pb2_grpc.JobServiceStub
channel = grpc.insecure_channel(self._endpoint)
grpc.channel_ready_future(channel).result(timeout=self._timeout)
return beam_job_api_pb2_grpc.JobServiceStub(channel)
def stop(self):
pass
class EmbeddedJobServer(JobServer):
def start(self):
# type: () -> local_job_service.LocalJobServicer
return local_job_service.LocalJobServicer()
def stop(self):
pass
class StopOnExitJobServer(JobServer):
"""Wraps a JobServer such that its stop will automatically be called on exit.
"""
def __init__(self, job_server):
self._lock = threading.Lock()
self._job_server = job_server
self._started = False
def start(self):
with self._lock:
if not self._started:
self._endpoint = self._job_server.start()
self._started = True
atexit.register(self.stop)
signal.signal(signal.SIGINT, self.stop)
return self._endpoint
def stop(self):
with self._lock:
if self._started:
self._job_server.stop()
self._started = False
class SubprocessJobServer(JobServer):
"""An abstract base class for JobServers run as an external process."""
def __init__(self):
self._local_temp_root = None
self._server = None
def subprocess_cmd_and_endpoint(self):
raise NotImplementedError(type(self))
def start(self):
if self._server is None:
self._local_temp_root = tempfile.mkdtemp(prefix='beam-temp')
cmd, endpoint = self.subprocess_cmd_and_endpoint()
port = int(endpoint.split(':')[-1])
self._server = subprocess_server.SubprocessServer(
beam_job_api_pb2_grpc.JobServiceStub, cmd, port=port)
return self._server.start()
def stop(self):
if self._local_temp_root:
shutil.rmtree(self._local_temp_root)
self._local_temp_root = None
return self._server.stop()
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarJobServer(SubprocessJobServer):
def __init__(self, options):
super(JavaJarJobServer, self).__init__()
options = options.view_as(pipeline_options.JobServerOptions)
self._job_port = options.job_port
self._artifact_port = options.artifact_port
self._expansion_port = options.expansion_port
self._artifacts_dir = options.artifacts_dir
def java_arguments(
self, job_port, artifact_port, expansion_port, artifacts_dir):
raise NotImplementedError(type(self))
def path_to_jar(self):
raise NotImplementedError(type(self))
@staticmethod
def path_to_beam_jar(gradle_target):
return subprocess_server.JavaJarServer.path_to_beam_jar(gradle_target)
@staticmethod
def local_jar(url):
return subprocess_server.JavaJarServer.local_jar(url)
def subprocess_cmd_and_endpoint(self):
jar_path = self.local_jar(self.path_to_jar())
artifacts_dir = (
self._artifacts_dir if self._artifacts_dir else self.local_temp_dir(
prefix='artifacts'))
job_port, = subprocess_server.pick_port(self._job_port)
return (['java', '-jar', jar_path] + list(
self.java_arguments(
job_port, self._artifact_port, self._expansion_port,
artifacts_dir)),
'localhost:%s' % job_port)
| {
"content_hash": "ae2677e337258086c61f3a1d20e185a3",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 79,
"avg_line_length": 29.697841726618705,
"alnum_prop": 0.6877422480620154,
"repo_name": "iemejia/incubator-beam",
"id": "ec40ae843b648b3599b7ff23035a722717905be7",
"size": "4934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/portability/job_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v12.services.types import payments_account_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PaymentsAccountServiceTransport(abc.ABC):
"""Abstract transport class for PaymentsAccountService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_payments_accounts: gapic_v1.method.wrap_method(
self.list_payments_accounts,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_payments_accounts(
self,
) -> Callable[
[payments_account_service.ListPaymentsAccountsRequest],
Union[
payments_account_service.ListPaymentsAccountsResponse,
Awaitable[payments_account_service.ListPaymentsAccountsResponse],
],
]:
raise NotImplementedError()
__all__ = ("PaymentsAccountServiceTransport",)
| {
"content_hash": "d116ea6e9741b96dc6f56f6057bc1a5e",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 101,
"avg_line_length": 38.776978417266186,
"alnum_prop": 0.6263450834879406,
"repo_name": "googleads/google-ads-python",
"id": "cfcc426b1743b8a85286474edc1dc753632d98ca",
"size": "5990",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/services/services/payments_account_service/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
"""Module containing classes to connect to Simian as a client."""
import datetime
import httplib
import logging
import mimetools
import os
import platform
import subprocess
import sys
import tempfile
import time
import urllib
import urlparse
import warnings
from M2Crypto import SSL
from M2Crypto.SSL import Checker
from simian.auth import x509
from simian import auth
from simian import settings
from simian.auth import client as auth_client
from simian.auth import util
warnings.filterwarnings(
'ignore', '.* md5 module .*', DeprecationWarning, '.*', 0)
# seek constants moved from posixfile(2.4) to os(2.5+)
if sys.version_info[0] <= 2 and sys.version_info[1] <= 4:
warnings.filterwarnings(
'ignore', '', DeprecationWarning, 'posixfile', 0)
import posixfile as _stdio # pylint: disable=g-import-not-at-top
else:
import os as _stdio # pylint: disable=g-import-not-at-top,reimported
DEFAULT_HTTP_ATTEMPTS = 4
DEFAULT_RETRY_HTTP_STATUS_CODES = frozenset([500, 502, 503, 504])
SERVER_HOSTNAME = settings.SERVER_HOSTNAME
SERVER_PORT = settings.SERVER_PORT
AUTH_DOMAIN = settings.AUTH_DOMAIN
CLIENT_SSL_PATH = settings.CLIENT_SSL_PATH
SEEK_SET = _stdio.SEEK_SET
SEEK_CUR = _stdio.SEEK_CUR
SEEK_END = _stdio.SEEK_END
DEBUG = False
if DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
URL_UPLOADPKG = '/uploadpkg'
_SSL_VERSION = 'sslv23'
_CIPHER_LIST = None
class Error(Exception):
"""Base class."""
class HTTPError(Error):
"""HTTP error."""
class SimianServerError(Error):
"""Simian server error."""
class SimianClientError(Error):
"""Simian client error."""
class SudoExecError(Error):
"""Error in SudoExec."""
class PuppetSslCertError(Error):
"""Error in _GetPuppetSslDetailsForCert."""
class FacterError(Error):
"""Error when using facter."""
class Response(object):
"""Response from https server."""
status = None
reason = None
headers = None
body = None
body_len = None
def __init__(
self, status, reason=None, body=None, headers=None, body_len=None):
"""Init the instance.
Args:
status: int, like 200, 400, etc
reason: str, like 'Bad Request' or 'OK'
body: str, optional, body of the response
headers: dict or list, like
{'Content-type': 'foo'} or
[('Content-type', 'foo')]
body_len: int, optional, length of the response body
"""
self.status = status
self.reason = reason
self.body = body
self.body_len = body_len
if type(headers) is dict:
self.headers = headers
elif type(headers) is list:
headers_dict = {}
for k, v in headers:
headers_dict[k] = v
self.headers = headers_dict
def IsSuccess(self):
"""Returns True on success, False otherwise."""
return self.status >= 200 and self.status <= 299
def IsRedirect(self):
"""Returns True on redirect, False otherwise."""
return self.status >= 300 and self.status <= 399
def IsClientError(self):
"""Returns True on client error, False otherwise."""
return self.status >= 400 and self.status <= 499
def IsServerError(self):
"""Returns True on server error, False otherwise."""
return self.status >= 500 and self.status <= 599
def IsError(self):
"""Returns True on client or server error, False otherwise."""
return self.status >= 400 and self.status <= 599
class MultiBodyConnection: # pylint: disable=g-old-style-class,no-init
"""Connection which can send multiple items as request body."""
# types we are willing to send in one block
DIRECT_SEND_TYPES = [str]
def SetProgressCallback(self, fn):
"""Set function to callback to with transfer progress.
Args:
fn: function which will receive (bytes sent, bytes total to send)
arguments
Raises:
Error: if non callable item is passed as fn
"""
if not callable(fn):
raise Error('SetProgressCallback argument fn must be callable')
self._progress_callback = fn
def _ProgressCallback(self, bytes_sent, bytes_total):
"""Call the progress callback with current transfer data.
Args:
bytes_sent: int, bytes sent
bytes_total: int, total bytes that will be sent
"""
if hasattr(self, '_progress_callback'):
self._progress_callback(bytes_sent, bytes_total)
def request(self, method, url, body=None, headers=None):
"""Send HTTP/HTTPS request.
The arguments are the same as httplib.HTTPConnection.request(),
except for the argument "body".
Args:
method: str, like 'GET'
url: str, url like '/path', not like 'http://server/path'
body: list or str or file-like obj.
If a list, multiple items are sent in list order.
For each item, if a str is passed, it is sent directly. If a file
or file-like object (implementing tell(), seek(), read()) is
supplied, it is read from in blocks and output directly into
the HTTP stream. This facilitates sending large files, etc without
headers: dict, headers to supply
"""
# NOTE(user): if you need extreme amounts of http debugging uncomment
# the following line:
# self.debuglevel = 9
if headers is None:
headers = {}
if body is not None:
if type(body) is list:
multibody = body
else:
multibody = [body]
content_length = 0
for body in multibody:
if type(body) in self.DIRECT_SEND_TYPES:
content_length += len(body)
elif hasattr(body, 'tell') and hasattr(
body, 'seek') and hasattr(body, 'read'):
orig_pos = body.tell()
body.seek(0, SEEK_END)
content_length += body.tell() - orig_pos
body.seek(orig_pos, SEEK_SET)
else:
raise NotImplementedError('multibody for type %s' % type(body))
# supply this pre-calculated value to stop the parent class
# from trying to figure it out with len().
headers['Content-Length'] = content_length
else:
multibody = []
content_length = 0
# IMPORTANT WORKAROUND for AppEngine /_ah/upload/ service.
#
# Python's HTTPConnection adds a ':port' suffix to the Host header
# when the port is not 80. In this case we might be using
# https so the Host header becomes "host:443".
#
# AppEngine 500s when it receives a Host header like
# "APPID.appspot.com:443".
#
# So, we self-assign the Host header and HTTPConnection won't
# auto-calculate it for us.
#
# TODO(user): This is the most conservative fix. We know that
# AppEngine will tolerate Host: foo(noportspecified) for a https
# tcp/443 connection. Upon further investigation with AppEngine
# Upload Service we could possibly refine this logic further.
if self._is_https and self.port == 443:
headers['Host'] = self.host
# don't pass a body here -- let's manage sending it ourselves.
# the connection is ready for it after this request() completes.
# note python >=2.7 httplib now offers this functionality for us,
# but we are continuing to do it ourselves.
httplib.HTTPConnection.request(
self, method, url, headers=headers)
bytes_sent = 0
self._ProgressCallback(bytes_sent, content_length)
# now, send the body sections, the connection is ready.
for body in multibody:
if type(body) in self.DIRECT_SEND_TYPES:
if body != '': # sending '' blows up M2Crypto write() sometimes.
self.send(body)
bytes_sent += len(body)
self._ProgressCallback(bytes_sent, content_length)
else:
buf = body.read(8192)
while buf != '':
self.send(buf)
bytes_sent += len(buf)
self._ProgressCallback(bytes_sent, content_length)
buf = body.read(8192)
self._ProgressCallback(bytes_sent, content_length)
class HTTPMultiBodyConnection(MultiBodyConnection, httplib.HTTPConnection):
"""HTTP multi-body connection implemented over HTTP."""
_is_https = False
class HTTPSMultiBodyConnection(MultiBodyConnection, httplib.HTTPSConnection):
"""HTTP multi-body connection implemented over HTTPS."""
_is_https = True
def __init__(self, *args, **kwargs):
# Note: MultiBodyConnection has no __init__. Change this if it ever does.
# MultiBodyConnection.__init__(*args, **kwargs)
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
@classmethod
def SetCACertChain(cls, certs):
"""Set the CA certificate chain to verify SSL peer (server) certs.
NOTE: Without having called this method to set a CA chain to verify
against, calling connect() in the future will fail out of paranoia.
Args:
certs: str, one or more X509 certificates concatenated after
another. the only required delimiter between certs is that
each cert start on a new line. (but an empty line is not required)
"""
cls._ca_cert_chain = certs
def _IsValidCert(self, ok, store):
"""Determine whether a cert is valid.
This method is called from M2Crypto set_verify as a hook. It is called
once for each cert in the chain that is used to validate the SSL
connection.
Args:
ok: int, always 1 or 0
store: M2Crypto.X509.X509_Store_Context
Returns:
1 if valid, 0 if not
"""
# if openssl has verified this cert ok==1, otherwise 0.
if ok != 1:
subject = str(store.get_current_cert().get_subject())
logging.debug(
'IsValidCert() ok=%s cert=%s, returning 0', str(ok), subject)
return (ok == 1) * 1
def _LoadCACertChain(self, ctx):
"""Load a CA certificate chain into a SSL context.
This includes setting the context verify modes to require certificate
validation on the peer's cert.
Args:
ctx: M2Crypto.SSL.Context, to load certificate chain into
Returns:
None if successful
Raises:
SimianClientError: if any errors occur in finding a chain of
certs to load (e.g. none supplied), or in parsing and loading them
"""
if not hasattr(self, '_ca_cert_chain'):
raise SimianClientError('Missing CA certificate chain')
tf = tempfile.NamedTemporaryFile()
tf.write(self._ca_cert_chain)
tf.flush()
if ctx.load_verify_locations(cafile=tf.name) != 1:
tf.close()
raise SimianClientError('Could not load CA certificate chain')
ctx.set_verify(
SSL.verify_peer | SSL.verify_fail_if_no_peer_cert,
depth=9,
callback=self._IsValidCert)
tf.close()
logging.debug(
'Loaded %d bytes of CA cert chain and configured ctx',
len(self._ca_cert_chain))
def connect(self):
"""Connect to the host and port specified in __init__."""
server_address = ((self.host, self.port))
ctx = SSL.Context(_SSL_VERSION)
if _CIPHER_LIST:
ctx.set_cipher_list(_CIPHER_LIST)
if hasattr(self, '_ca_cert_chain'):
self._LoadCACertChain(ctx)
else:
raise SimianClientError('Missing CA certificate chain')
logging.debug('SSL configuring with context')
sock = SSL.Connection(ctx)
logging.debug('SSL connect(%s)', server_address)
try:
sock.connect(server_address)
except SSL.SSLError, e:
raise SimianClientError('SSL error: %s' % str(e))
except Checker.SSLVerificationError, e:
raise SimianClientError('SSLVerificationError: %s' % str(e))
logging.debug('SSL connected %s', server_address)
self.sock = sock
# Note we are dropping HTTP CONNECT tunnel support by not handling
# _tunnel_* options here, see original HTTPConnection.connect().
class HttpsClient(object):
"""Connect to a http or https service.
Defaults to https unless if overridden with a URL-style hostname,
e.g. "http://...."
"""
def __init__(self, hostname, port=None, proxy=None):
self._LoadHost(hostname, port, proxy)
self._progress_callback = None
self._ca_cert_chain = None
def SetProgressCallback(self, fn):
self._progress_callback = fn
def SetCACertChain(self, certs):
"""Set the CA certificate chain to verify SSL server certs.
Args:
certs: str, one or more X509 certificates concatenated after
another
"""
self._ca_cert_chain = certs
def _LoadHost(self, hostname, port=None, proxy=None):
"""Load hostname and port to connect to.
Args:
hostname: str, like a URL or a hostname string. Examples:
'http://foo' 'http://foo:port' 'https://foo' 'foo:port' 'foo'
port: int, optional, port to connect to, which will be overridden by
any port specified in the hostname str.
proxy: str, optional, "host:port" formatted HTTP proxy
Raises:
Error: if args are malformed
"""
logging.debug('LoadHost(%s, %s)', hostname, port)
# unicode causes problems later on the socket level. rid ourselves of it.
hostname = str(hostname)
if proxy is not None:
proxy = str(proxy)
elif proxy is None:
if os.environ.get('HTTPS_PROXY'):
proxy = str(os.environ['HTTPS_PROXY'])
elif os.environ.get('http_proxy'):
proxy = str(os.environ['http_proxy'])
# note: defaulting to https when no scheme is given.
if not hostname.startswith('http'):
hostname = 'https://%s' % hostname
(scheme, netloc, unused_path, unused_query, unused_frag
) = urlparse.urlsplit(hostname)
(hostname, tmp_port) = urllib.splitport(netloc)
if tmp_port:
port = tmp_port
use_https = False
if scheme == 'https':
use_https = True
if port:
try:
port = int(port)
except TypeError:
raise Error('invalid port value %s' % str(port))
if port == 80 and not use_https:
port = None
if port == 443 and use_https:
port = None
self.hostname = hostname
self.port = port
self.netloc = self.hostname
self.use_https = use_https
if self.port and self.port != 80 and self.port != 443:
self.netloc = '%s:%d' % (self.netloc, self.port)
logging.debug('LoadHost(): hostname = %s, port = %s, use_https = %s',
self.hostname, self.port, self.use_https)
self.proxy_hostname = None
self.proxy_port = None
self.proxy_use_https = False
if proxy:
u = urlparse.urlparse(proxy)
if u.scheme in ['https', 'http']:
self.proxy_use_https = u.scheme == 'https'
(self.proxy_hostname, self.proxy_port) = urllib.splitport(u.netloc)
else:
(self.proxy_hostname, self.proxy_port) = urllib.splitport(proxy)
if not self.proxy_port:
raise Error('proxy does not specify port: %s', proxy)
self.proxy_port = int(self.proxy_port)
logging.debug('LoadHost(): proxy host = %s, proxy port = %s',
self.proxy_hostname, self.proxy_port)
def _AdjustHeaders(self, unused_headers):
"""Adjust headers before a request.
Intended for override in subclasses to inject headers.
"""
return
def _Connect(self):
"""Return a HTTPSConnection object.
Returns:
HTTPConnection object
"""
conn_args = (self.hostname, self.port)
if self.proxy_hostname:
conn_args = (self.proxy_hostname, self.proxy_port)
use_https = self.proxy_use_https
else:
use_https = self.use_https
if use_https:
conn = HTTPSMultiBodyConnection(*conn_args)
else:
conn = HTTPMultiBodyConnection(*conn_args)
# NOTE(user): at this point it would be nice to copy our debug level
# into the http connection instance with set_debuglevel(). however the
# debug is printed to stdout, which will foul up our clients.
if self._progress_callback is not None:
conn.SetProgressCallback(self._progress_callback)
if use_https:
if self._ca_cert_chain is not None:
conn.SetCACertChain(self._ca_cert_chain)
try:
conn.connect()
except httplib.socket.error, e:
raise SimianClientError('_Connect() httplib.socket.error: %s' % str(e))
return conn
def _GetResponse(self, conn, output_file=None):
"""Obtain a response from the connection and interpret it.
Args:
conn: HTTP{,S}Connection
output_file: file, optional, file to write response body to
Returns:
Response instance
"""
response = conn.getresponse()
headers = response.getheaders()
status = response.status
reason = response.reason
body_len = 0
read_len = 8192 # some arbitrary block size
if output_file:
buf = response.read(read_len)
while buf:
body_len += len(buf)
output_file.write(buf)
buf = response.read(read_len)
body = None
else:
body = response.read()
body_len = len(body)
return Response(
status=status, reason=reason,
headers=headers, body=body, body_len=body_len)
def _Request(self, method, conn, url, body=None, headers=None):
"""Make a https request on the supplied connection.
Args:
method: str, like 'GET' or 'POST'
conn: HTTP{,S}Connection
url: str, url to connect to, starting with the first /
body: str or dict or file, optional, body to send with request
headers: dict, dictionary of headers to supply
"""
if body is not None and type(body) is dict:
body = urllib.urlencode(body)
if headers is None:
headers = {}
if 'User-Agent' not in headers:
headers['User-Agent'] = 'gzip'
self._AdjustHeaders(headers)
# smash url to str(), in case unicode has slipped in, which never
# sends properly.
conn.request(method, str(url), body=body, headers=headers)
def _DoRequestResponse(
self, method, url, body=None, headers=None, output_file=None):
"""Connect to hostname, make a request, obtain response.
Args:
method: str, like 'GET' or 'POST'
url: str, url like '/foo.html', not 'http://host/foo.html'
body: str or dict or file, optional, body to send with request
headers: dict, optional, headers to send with request
output_file: file, optional, file to write response body to
Returns:
Response instance
Raises:
HTTPError: if a connection level error occured
"""
try:
suffix = self.use_https * 's'
logging.debug('Connecting to http%s://%s:%s',
suffix, self.hostname, self.port)
conn = self._Connect()
# if proxy is in use, request the full URL including host.
if self.proxy_hostname:
url = 'http%s://%s%s' % (self.use_https * 's', self.netloc, url)
logging.debug('Requesting %s %s', method, url)
self._Request(method, conn, url, body=body, headers=headers)
logging.debug('Waiting for response')
response = self._GetResponse(conn, output_file=output_file)
logging.debug('Response status %d', response.status)
return response
except httplib.HTTPException, e:
raise HTTPError(str(e))
except IOError as e:
raise HTTPError(str(e))
def Do(
self, method, url,
body=None, headers=None, output_filename=None,
retry_on_status=DEFAULT_RETRY_HTTP_STATUS_CODES,
attempt_times=DEFAULT_HTTP_ATTEMPTS, _open=open):
"""Make a request and return the response.
Args:
method: str, like 'GET' or 'POST'
url: str, url like '/foo.html', not 'http://host/foo.html'
body: str or dict or file, optional, body to send with request
headers: dict, optional, headers to send with request
output_filename: str, optional, filename to write response body to
retry_on_status: list, default (500, 502, etc.), int status codes to
retry upon receiving.
attempt_times: int, default 4, how many times to attempt the request
_open: func, optional, default builtin open, to open output_filename
Returns:
Response object
Raises:
NotImplementedError: if an unknown method is supplied
HTTPError: if a connection level error occured
"""
if method not in ['GET', 'POST', 'PUT', 'DELETE']:
raise NotImplementedError('HTTP method %s' % method)
if headers is None:
headers = {}
if method == 'POST' and body is None:
body = '' # 10.9 workaround
if output_filename:
output_file = _open(output_filename, 'w')
else:
output_file = None
n = 0
while n < attempt_times:
time.sleep(n * 5)
n += 1
logging.debug('Do(%s, %s) try #%d', method, url, n)
try:
response = self._DoRequestResponse(
method, url, body=body, headers=headers, output_file=output_file)
except HTTPError:
logging.warning('HTTPError in Do(%s, %s)', method, url)
if n == attempt_times:
raise
else:
if response.status not in retry_on_status:
break
logging.warning('Retry status hit for Do(%s, %s)', method, url)
if output_filename:
output_file.close()
return response
def DoMultipart(
self, url, params, filename, input_filename=None, input_file=None):
"""Make a form/multipart POST request and return the response.
Args:
url: str, url like '/foo.html', not 'http://host/foo.html'
params: dict, text parameters to send as text/plain form elements
filename: str, filename to be supplied in headers, it is NOT read from
input_filename: str, optional, filename to read from
input_file: file, optional, file object to read from
Returns:
Response object
Raises:
Error: if input is invalid
HTTPError: if a connection level error occured
"""
if not input_filename and not input_file:
raise Error('must supply input_filename or input_file')
boundary = mimetools.choose_boundary()
content_type = 'application/octet-stream'
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
}
close_input_file = False
if input_file is None:
close_input_file = True
input_file = open(input_filename, 'r')
crlf = '\r\n'
body = []
# TODO(user): This method should support sending multiple files,
# not just one.
tmp_body = []
tmp_body.append('--%s' % boundary)
tmp_body.append(
('Content-Disposition: form-data; name="file"; '
'filename="%s"' % filename))
tmp_body.append('Content-Type: %s' % content_type)
tmp_body.append('')
body.append(crlf.join(tmp_body))
body.append(crlf)
body.append(input_file)
body.append(crlf)
tmp_body = []
for k, v in params.iteritems():
tmp_body.append('--%s' % boundary)
tmp_body.append(
'Content-Disposition: form-data; name="%s"' % k)
tmp_body.append('Content-type: text/plain; charset=utf-8')
tmp_body.append('')
tmp_body.append(v)
body.append(crlf.join(tmp_body))
body.append('%s--%s--%s' % (crlf, boundary, crlf))
body.append(crlf)
try:
response = self.Do('POST', url, body=body, headers=headers)
except:
if close_input_file:
input_file.close()
raise
if close_input_file:
input_file.close()
return response
class HttpsAuthClient(HttpsClient):
"""Https client with support for authentication."""
CLIENT_SSL_PATH = CLIENT_SSL_PATH
PUPPET_CERTS = 'certs'
PUPPET_PRIVATE_KEYS = 'private_keys'
PUPPET_CA_CERT = 'ca.pem'
FACTER_CACHE_OSX_PATH = '/Library/Managed Installs/facter.cache'
FACTER_CACHE_DEFAULT_PATH = None # disabled
def __init__(self, *args, **kwargs):
super(HttpsAuthClient, self).__init__(*args, **kwargs)
self._auth1 = None
self._cookie_token = None
self._LoadRootCertChain()
self._PlatformSetup()
self._LoadCaParameters()
def _LoadRootCertChain(self):
"""Load CA certificates."""
logging.debug('_LoadRootCertChain()')
certs = self.GetSystemRootCACertChain()
self.SetCACertChain(certs)
def _PlatformSetup(self):
"""Platform specific instance setup."""
if platform.system() == 'Darwin':
self.facter_cache_path = self.FACTER_CACHE_OSX_PATH
else:
self.facter_cache_path = self.FACTER_CACHE_DEFAULT_PATH
def _LoadCaParameters(self):
"""Load CA parameters from settings."""
logging.debug('LoadCaParameters')
self._ca_params = util.GetCaParameters(
settings, omit_server_private_key=True)
logging.debug('Loaded ca_params')
def _AdjustHeaders(self, headers):
"""Adjust headers before a request.
Override in subclasses.
Args:
headers: dict, headers that will be passed to a http request.
"""
if self._cookie_token and headers is not None:
headers['Cookie'] = self._cookie_token
def GetSystemRootCACertChain(self):
"""Load system supplied root CA certs.
Returns:
str, all x509 root ca certs, or '' if none can be found
"""
try:
f = open(settings.ROOT_CA_CERT_CHAIN_PEM_PATH, 'r')
contents = f.read()
except (AttributeError, IOError):
contents = None # root CA cert chain is optional
if contents:
logging.debug('Got Root CA Cert Chain: %s', contents)
return contents
else:
logging.warning('Root CA Cert Chain was EMPTY!')
return ''
def _SudoExec(self, argv, expect_rc=None):
"""Run an argv list with sudo.
Args:
argv: list, arguments to exec, argv[0] is binary
expect_rc: int, optional, expected return code from exec
Returns:
(str stdout output, str stderr output)
Raises:
SudoExecError: if an expect_* condition was not met
"""
# NOTE(user): sudo 1.6.8p12 on OS X 10.5.8 doesn't understand the '--'
# argument to stop parsing args. Instead we do this evilness to enforce
# a fully qualified command as first arg, which will clue sudo in to
# pass the rest of args to the called program.
if not argv[0].startswith('/'):
raise SudoExecError(
'First argument must have absolute path to run: %s' % argv[0])
_argv = ['/usr/bin/sudo'] # better would be [sudo, '--']
_argv.extend(argv)
logging.info('Executing sudo: %s', ' '.join(_argv))
p = subprocess.Popen(
_argv, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
rc = p.wait()
if expect_rc is not None:
if rc != expect_rc:
raise SudoExecError(
'Sudo exec %s: rc %d != %d' % (_argv, rc, expect_rc))
return (stdout, stderr)
def _LoadFile(self, filename, requires_root=False, sudo_ok=False):
"""Load a filename's contents into a string.
Args:
filename: str, file to load
requires_root: bool, optional, whether root is required to read
sudo_ok: bool, optional, whether sudo may be used for root file access
Returns:
string file contents
"""
if requires_root and os.getuid() != 0 and sudo_ok:
(s, unused_stderr) = self._SudoExec(['/bin/cat', filename], expect_rc=0)
else:
f = open(filename, 'r')
s = f.read()
f.close()
return s
def _IsFile(self, filename, requires_root=False, sudo_ok=False):
"""Check if a file exists and is a file.
Args:
filename: str, filename to check
requires_root: bool, optional, whether root is required to read
sudo_ok: bool, optional, whether sudo may be used for root file access
Returns:
bool, True if the file exists
"""
if requires_root and os.getuid() != 0 and sudo_ok:
try:
# the path location of bash is more standard than that of test(1)
self._SudoExec(
['/bin/bash', '-c', '[ -f %s ]' % filename],
expect_rc=0)
return True
except SudoExecError:
return False
else:
return os.path.isfile(filename)
def GetFacter(self, open_fn=open):
"""Return facter contents.
Args:
open_fn: func, optional, supply an open() function
Returns:
dict, facter contents
"""
if self.facter_cache_path is None:
return {}
if not os.path.isfile(self.facter_cache_path):
logging.info('GetFacter: facter cache file does not exist.')
return {}
facter = {}
use_facter_cache = False
try:
st = os.stat(self.facter_cache_path)
# if we are root, and the writer of the cache was not root, OR
# if we are not root, the cache was not written by root, and
# the cache was not written by ourselves
if (os.geteuid() == 0 and st.st_uid != 0) or (
os.geteuid() != 0 and st.st_uid != 0 and os.geteuid() != st.st_uid):
# don't trust this file. be paranoid.
logging.info('GetFacter: Untrusted facter cache, ignoring')
use_facter_cache = False
else:
use_facter_cache = True
cache_mtime = datetime.datetime.fromtimestamp(st.st_mtime)
logging.debug('GetFacter: facter cache mtime is %s', cache_mtime)
except OSError, e:
logging.info('GetFacter: OSError from os.stat(): %s', str(e))
use_facter_cache = False
if use_facter_cache:
try:
logging.debug('GetFacter: reading recent facter cache')
f = open_fn(self.facter_cache_path, 'r')
facter = {}
line = f.readline()
while line:
try:
(key, unused_sep, value) = line.split(' ', 2)
value = value.strip()
facter[key] = value
except ValueError:
logging.info('GetFacter: ignoring facter cache line: %s', line)
line = f.readline()
f.close()
logging.debug('GetFacter: read %d entities', len(facter))
except (EOFError, IOError), e:
logging.warning('GetFacter: error %s', str(e))
facter = {}
return facter
def _GetPuppetSslDetails(self, cert_fname=None, interactive_user=False):
"""Get Puppet SSL details.
Args:
cert_fname: str, optional, certification filename.
interactive_user: bool, optional, default False,
True if the client user an interactive user who can be prompted
for auth.
Returns:
dict = {
'cert': str, X509 format client certificate in PEM format,
'ca_cert': str, X509 format CA certificate in PEM format,
'priv_key': str, X509 format private key in PEM format,
'cn': str, commonName of this client's certificate
}
or {} if the details cannot be read.
"""
# TODO(user): unit test the puppet ssl cert harvesting functions.
logging.debug('_GetPuppetSslDetails(%s)', cert_fname)
certs_path = os.path.join(self.CLIENT_SSL_PATH, self.PUPPET_CERTS)
best_cert = None
priv_key = False
output = {}
cert_name = None
if not cert_fname: # if cert filename is not passed, check facter.
try:
facts = self.GetFacter()
except FacterError:
# don't give up, facter fails from time to time.
facts = {}
cert_name = facts.get('certname', None)
if cert_name:
cert_name = cert_name.strip()
logging.debug('Certname from facter: "%s"', cert_name)
cert_name = cert_name.lower()
cert_fname = '%s.pem' % cert_name
else:
logging.debug('Error obtaining certname from facter')
cert_fname = None
if cert_fname:
try:
# attempt to get the cert passed as cert_fname,
# or returned from facter.
self._ValidatePuppetSslCert(certs_path, cert_fname)
best_cert = cert_fname
except PuppetSslCertError:
# could not harvest the cert facter tells us to use, look for others.
logging.error('Failed to harvest Puppet SSL cert facter specified.')
if not best_cert:
best_cert = self._GetNewestPuppetSslCert()
# found, using the same filename look for the private cert.
if best_cert:
cn = best_cert.rsplit('.', 1)[0]
priv_key = os.path.join(
self.CLIENT_SSL_PATH, self.PUPPET_PRIVATE_KEYS, best_cert)
logging.debug('_GetPuppetSslDetails priv should be %s', priv_key)
# is it there?
if self._IsFile(priv_key, requires_root=True, sudo_ok=interactive_user):
output['cn'] = cn
output['cert'] = self._LoadFile(os.path.join(
self.CLIENT_SSL_PATH, self.PUPPET_CERTS, best_cert))
output['priv_key'] = self._LoadFile(
priv_key, requires_root=True, sudo_ok=interactive_user)
else:
logging.debug('_GetPuppetSslDetails not IsFile %s', priv_key)
# NOTE(user): There is a maximum size of a single syslog message
# under OS X on Python (the exact value of which seems to depend on OS
# X version)
if 'headers' in output:
logging.info('Output headers = %s', output['headers'])
return output
def _ValidatePuppetSslCert(self, cert_dir_path, cert_fname):
"""Validates and returns true if a given Puppet SSL cert is valid.
Args:
cert_dir_path: str path to cert dir.
cert_fname: str filename of the cert.
Returns:
Boolean, True if the cert is validated.
Raises:
PuppetSslCertError: there was an error reading the cert.
"""
required_issuer = self._ca_params.required_issuer
logging.debug(
'_ValidatePuppetSslCert: required_issuer %s', required_issuer)
try:
cert_path = os.path.join(cert_dir_path, cert_fname)
logging.debug('_ValidatePuppetSslCert: %s', cert_path)
f = open(cert_path, 'r')
s = f.read()
f.close()
x = x509.LoadCertificateFromPEM(s)
issuer = x.GetIssuer()
logging.debug('Looking at issuer %s', issuer)
# Check issuer match.
if issuer != required_issuer:
# no match at all.
msg = 'Skipping cert %s, unknown issuer' % cert_fname
logging.warning(msg)
logging.warning(
'Expected: "%s" Received: "%s"', required_issuer, issuer)
raise PuppetSslCertError(msg)
except IOError, e:
logging.debug('Skipped cert %s, IO Error %s', cert_fname, str(e))
raise PuppetSslCertError(str(e))
except OSError, e:
logging.debug('Skipped cert %s, OS Error %s', cert_fname, str(e))
raise PuppetSslCertError(str(e))
except x509.Error, e:
logging.debug('Skipped cert %s, x509 error %s', cert_fname, str(e))
raise PuppetSslCertError(str(e))
return True
def _GetNewestPuppetSslCert(self):
"""Harvests the newest Puppet SSL cert in the public certs directory.
This directory is world readable so no increased privileges will be
required.
Returns:
str newest cert filename, or None if none were found.
"""
newest_cert = None
newest_cert_timestamp = None
certs_path = os.path.join(self.CLIENT_SSL_PATH, self.PUPPET_CERTS)
certs = os.listdir(certs_path)
logging.debug(
'_GetNewestPuppetSslCert found certs %s', ' '.join(certs))
for cert_fname in certs:
if cert_fname != self.PUPPET_CA_CERT and cert_fname.endswith('.pem'):
try:
self._ValidatePuppetSslCert(certs_path, cert_fname)
except PuppetSslCertError:
continue
cert_timestamp = os.path.getmtime(os.path.join(certs_path, cert_fname))
if not newest_cert_timestamp or cert_timestamp > newest_cert_timestamp:
logging.debug(
'_GetPuppetSslDetails found cert %s with timestamp %s',
cert_fname, cert_timestamp)
newest_cert_timestamp = cert_timestamp
newest_cert = cert_fname
else:
logging.debug(
'_GetPuppetSslDetails skipping cert %s with older timestamp %s',
cert_fname, cert_timestamp)
# don't break here; need to exhaustively check the dir for newest cert.
return newest_cert
def _InitializeAuthClass(self, interactive_user=False, puppet_ssl=True):
"""Instantiate and configure an Auth1Client class.
Args:
interactive_user: bool, optional, default False,
True if the client user an interactive user who can be prompted
for auth.
puppet_ssl: bool, optional, default True,
True if the client should obtain SSL certs from Puppet
Raises:
SimianClientError: If SSL details from Puppet could not be harvested
"""
if self._auth1 is not None:
return
auth1 = auth_client.AuthSimianClient()
if puppet_ssl:
o = self._GetPuppetSslDetails(interactive_user=interactive_user)
if not o:
raise SimianClientError('Could not obtain SSL details')
# Load the CA parameters after GetPuppetSslDetails figured out
# which CA settings are optimal to use on this client.
auth1.LoadCaParameters(settings)
auth1.LoadSelfKey(o['priv_key'])
auth1.LoadSelfCert(o['cert'])
else:
auth1.LoadCaParameters(settings)
self._auth1 = auth1
def DoSimianAuth(self, interactive_user=None):
"""Do Simian authentication.
Args:
interactive_user: bool, optional, default based on current tty,
True if the client user an interactive user who can be prompted
for auth.
Raises:
SimianServerError: an error occurs on the server
SimianClientError: an error occurs on the client
"""
if interactive_user is None:
interactive_user = os.isatty(sys.stdin.fileno())
self._InitializeAuthClass(interactive_user)
# Step 0 - acquire a client nonce
self._auth1.Input()
cn = self._auth1.Output()
# Generate /auth URL
auth_url = '/auth'
if self._ca_params.ca_id:
auth_url = '%s?ca_id=%s' % (auth_url, self._ca_params.ca_id)
# Step 1 - send client nonce to server
response = self.Do('POST', auth_url, {'n': cn})
# Step 1 return - look at server message output
if response.status != 200:
raise SimianServerError(
'Auth step 1: %d %s' % (response.status, response.body))
self._auth1.Input(m=response.body)
o = self._auth1.Output()
if not o:
raise SimianClientError('Auth error: %s' % (
' '.join(self._auth1.ErrorOutput())))
# Step 2 - send signed message to server
response = self.Do('POST', auth_url, {'s': o['s'], 'm': o['m']})
# Step 2 return - verify
if response.status != 200:
raise SimianServerError('Auth step 2')
# Step 3 - load response
self._auth1.Input(t=response.body)
if not self._auth1.AuthStateOK():
raise SimianClientError('Auth failed: %s' % (
' '.join(self._auth1.ErrorOutput())))
# Success
self._cookie_token = self._GetAuthTokenFromHeaders(response.headers)
def _GetAuthTokenFromHeaders(self, headers):
"""Parses headers dict to return string auth token.
Args:
headers: HTTP response headers in dict-like object.
Returns:
string Simian Auth Token.
Raises:
SimianClientError: no token was found.
"""
sanitized_headers = headers.copy()
del sanitized_headers['set-cookie']
logging.info('headers = %s', sanitized_headers)
tokens = headers.get('set-cookie', None)
if tokens is None:
raise SimianClientError('No token supplied on cookie')
tokens = tokens.split(',') # split multiple cookies
for token in tokens:
if token.startswith(auth.AUTH_TOKEN_COOKIE):
logging.debug('Found cookie token.')
return token
raise SimianClientError('No recognizable token found in cookies')
class SimianClient(HttpsAuthClient):
"""Client to connect to Simian server."""
def __init__(self, hostname=None, port=None, root_ok=False):
if hostname is None:
hostname = SERVER_HOSTNAME
self._default_hostname = True
else:
self._default_hostname = False
if port is None:
port = SERVER_PORT
logging.debug(
'SimianClient.__init__(%s [default=%s], %s, %s)',
hostname, self._default_hostname, port, root_ok)
self._user = self._GetLoggedOnUser()
if self._user == 'root' and not root_ok:
raise SimianClientError('Simian client must not be run as root!')
super(SimianClient, self).__init__(hostname, port)
def IsDefaultHostClient(self):
"""Returns True if the client was initialized with default hostname."""
return self._default_hostname
def _SimianRequest(
self, method, url, body=None, headers=None, output_filename=None,
full_response=False):
"""Make a request and return the body if successful.
Args:
method: str, HTTP method to use, like GET or POST.
url: str, url to connect to, like '/foo/1'
body: str or file or dict, optional, body of request
headers: optional dict headers to send with the request.
output_filename: str, optional, filename to write response body to
full_response: bool, default False, return response object
Returns:
if output_filename is not supplied:
if full_response is True:
Response instance
else:
str, body received over http
otherwise:
None
Raises
SimianServerError: if the Simian server returned an error (status != 200)
"""
try:
response = self.Do(
method, url, body=body, headers=headers,
output_filename=output_filename)
except HTTPError, e:
raise SimianServerError(str(e))
if response.IsSuccess():
if not full_response:
return response.body
else:
return response
else:
raise SimianServerError(response.status, response.reason, response.body)
def _GetLoggedOnUser(self):
"""Returns the username of the logged on user."""
if sys.platform == 'win32':
return os.getenv('USERNAME')
else:
return os.getenv('LOGNAME')
def GetCatalog(self, name):
"""Get a catalog."""
return self._SimianRequest('GET', '/catalog/%s' % name)
def GetManifest(self, name):
"""Get a manifest."""
return self._SimianRequest('GET', '/manifest/%s' % name)
def GetPackage(self, name, output_filename=None):
"""Get a package.
Args:
name: str, package name
output_filename: str, optional, filename to write response body to
Returns:
See _SimianRequest
"""
return self._SimianRequest(
'GET', '/pkgs/%s' % urllib.quote(name),
output_filename=output_filename)
def GetPackageInfo(self, filename, get_hash=False):
"""Get package info.
Args:
filename: str, name of packageinfo
get_hash: bool, default False, request that the server include
a X-Pkgsinfo-Hash with the response, a sha256 hash of the pkginfo.
Returns:
if not request_hash, str pkginfo XML
if request_hash, tuple of (str sha256 hash, str pkginfo XML)
Raises:
SimianServerError: if an error occured on the Simian server
"""
url = '/pkgsinfo/%s' % urllib.quote(filename)
if get_hash:
url = '%s?hash=1' % url
response = self._SimianRequest('GET', url, full_response=True)
if get_hash:
if 'x-pkgsinfo-hash' not in response.headers:
logging.debug(
'GET %s returned headers = %s', url, str(response.headers))
raise SimianServerError('No hash was supplied with pkginfo')
return response.headers['x-pkgsinfo-hash'], response.body
else:
return response.body
def DownloadPackage(self, filename):
"""Downloads a package.
Writes the package with the same filename into the current directory.
Args:
filename: str filename of the package to download.
Returns:
None
Raises:
SimianServerError: if the Simian server returned an error (status != 200)
"""
return self._SimianRequest(
'GET', '/pkgs/%s' % urllib.quote(filename),
output_filename=filename)
def GetPackageMetadata(
self, install_types=None, catalogs=None, filename=None):
"""Gets a list of all packages of given install_types and catalogs.
Args:
install_types: list of string install types.
catalogs: list of string catalogs.
filename: str filename of the package.
Returns:
str body from response.
Raises:
SimianServerError: if the Simian server returned an error (status != 200)
"""
query = []
if install_types:
query.append('install_types=%s' % install_types)
if catalogs:
query.append('catalogs=%s' % catalogs)
if filename:
query.append('filename=%s' % filename)
query = '&'.join(query)
return self._SimianRequest('GET', '/pkgsinfo/?%s' % query)
def PostReport(self, report_type, params, feedback=False):
"""Post a report to the server.
Args:
report_type: str, like 'install_report'
params: dict, parameters to pass
feedback: bool, default False, request feedback response from server
Returns:
str body from response
Raises:
SimianServerError: if the Simian server returned an error (status != 200)
"""
body = urllib.urlencode(params, doseq=True)
body = '_report_type=%s&%s' % (report_type, body)
if feedback:
body = '%s&_feedback=1' % (body)
return self._SimianRequest('POST', '/reports', str(body))
def PostReportBody(self, body, feedback=False):
"""Post a pre-encoded report to the server.
Args:
body: str, the report body, urlencoded, it should contain
a _report_type value!
feedback: bool, default False, request feedback response from server
Returns:
str body from response
Raises:
SimianServerError: if the Simian server returned an error (status != 200)
"""
url = '/reports'
if feedback:
body = '%s&_feedback=1' % str(body)
else:
body = str(body)
return self._SimianRequest('POST', url, body)
def UploadFile(self, file_path, file_type, _open=open):
"""Uploads a given log file to the server.
Args:
file_path: str, path of log file to upload.
file_type: str, type of file being uploaded, like 'log'.
_open: func, optional, default builtin open, to open file_path.
"""
if os.path.isfile(file_path):
logging.debug('UploadFile uploading file: %s', file_path)
file_handle = _open(file_path, 'r')
file_name = os.path.basename(file_path)
url = '/uploadfile/%s/%s' % (file_type, file_name)
try:
self.Do('PUT', url, file_handle)
finally:
file_handle.close()
else:
logging.error('UploadFile file not found: %s', file_path)
class SimianAuthClient(SimianClient):
"""Client perform authentication steps with Simian server."""
def __init__(self, hostname=None, port=None, root_ok=None):
super(SimianAuthClient, self).__init__(hostname, port, root_ok=True)
def GetAuthToken(self):
"""Obtain a token from the server.
Returns:
token str
"""
self.DoSimianAuth()
return self._cookie_token
def SetAuthToken(self, token):
"""Set the token.
Args:
token: str, token
"""
self._cookie_token = str('%s=%s' % (auth.AUTH_TOKEN_COOKIE, token))
def LogoutAuthToken(self):
"""Given a token, make logout request to end that token.
Returns:
True if logout success, False if not
"""
url = '/auth?logout=True'
try:
self._SimianRequest('GET', url)
return True
except SimianServerError:
return False
| {
"content_hash": "a4f626bd9caf9038431a021d6621ca72",
"timestamp": "",
"source": "github",
"line_count": 1482,
"max_line_length": 79,
"avg_line_length": 31.83940620782726,
"alnum_prop": 0.6401898868308397,
"repo_name": "googlearchive/simian",
"id": "8572dfde6055cc5a729d3df929da6b8f3da04d0f",
"size": "47808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/simian/client/client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38259"
},
{
"name": "HTML",
"bytes": "97532"
},
{
"name": "JavaScript",
"bytes": "34498"
},
{
"name": "Makefile",
"bytes": "8173"
},
{
"name": "Python",
"bytes": "1422429"
},
{
"name": "Shell",
"bytes": "13277"
}
],
"symlink_target": ""
} |
"""
Run some syntax checks.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import os
import sys
sys.path.insert(0, "../src")
sys.path.insert(1, "PyroTests")
def Pyflakes(path, modules):
try:
from pyflakes.scripts.pyflakes import checkPath
except ImportError:
print("PYFLAKES not installed. Skipping.")
return
warnings = 0
for m in modules:
warnings += checkPath(os.path.join(path, m))
print("%d warnings occurred in pyflakes check" % warnings)
def main(args):
pyropath = "../src/Pyro4"
pyromodules = [module for module in os.listdir(pyropath) if module.endswith(".py")]
checkers = args or ["flakes"]
if "flakes" in checkers:
print("-" * 20 + "PYFLAKES")
Pyflakes(pyropath, pyromodules)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "543a70f6f2b7fab89d8e200d0d51c2d4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 87,
"avg_line_length": 23.842105263157894,
"alnum_prop": 0.6412803532008831,
"repo_name": "irmen/Pyro4",
"id": "bb5ea9bc3fdee2ea5e8b4c9217964c4604d98dea",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/run_syntaxcheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1283"
},
{
"name": "Python",
"bytes": "618799"
},
{
"name": "Shell",
"bytes": "2394"
}
],
"symlink_target": ""
} |
""" A neural chatbot using sequence to sequence model with
attentional decoder.
This is based on Google Translate Tensorflow model
https://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/
Sequence to sequence model by Cho et al.(2014)
Created by Chip Huyen as the starter code for assignment 3,
class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
This file contains the code to run the model.
See readme.md for instruction on how to run the starter code.
"""
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
from model import ChatBotModel
import config
import data
def _get_random_bucket(train_buckets_scale):
""" Get a random bucket from which to choose a training sample """
rand = random.random()
return min([i for i in range(len(train_buckets_scale))
if train_buckets_scale[i] > rand])
def _assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks):
""" Assert that the encoder inputs, decoder inputs, and decoder masks are
of the expected lengths """
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(decoder_masks) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_masks), decoder_size))
def run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, forward_only):
""" Run one step in training.
@forward_only: boolean value to decide whether a backward path should be created
forward_only is set to True when you just want to evaluate on the test set,
or when you want to the bot to be in chat mode. """
encoder_size, decoder_size = config.BUCKETS[bucket_id]
_assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks)
# input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for step in range(encoder_size):
input_feed[model.encoder_inputs[step].name] = encoder_inputs[step]
for step in range(decoder_size):
input_feed[model.decoder_inputs[step].name] = decoder_inputs[step]
input_feed[model.decoder_masks[step].name] = decoder_masks[step]
last_target = model.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([model.batch_size], dtype=np.int32)
# output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [model.train_ops[bucket_id], # update op that does SGD.
model.gradient_norms[bucket_id], # gradient norm.
model.losses[bucket_id]] # loss for this batch.
else:
output_feed = [model.losses[bucket_id]] # loss for this batch.
for step in range(decoder_size): # output logits.
output_feed.append(model.outputs[bucket_id][step])
outputs = sess.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def _get_buckets():
""" Load the dataset into buckets based on their lengths.
train_buckets_scale is the inverval that'll help us
choose a random bucket later on.
"""
test_buckets = data.load_data('test_ids.enc', 'test_ids.dec')
data_buckets = data.load_data('train_ids.enc', 'train_ids.dec')
train_bucket_sizes = [len(data_buckets[b]) for b in range(len(config.BUCKETS))]
print("Number of samples in each bucket:\n", train_bucket_sizes)
train_total_size = sum(train_bucket_sizes)
# list of increasing numbers from 0 to 1 that we'll use to select a bucket.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in range(len(train_bucket_sizes))]
print("Bucket scale:\n", train_buckets_scale)
return test_buckets, data_buckets, train_buckets_scale
def _get_skip_step(iteration):
""" How many steps should the model train before it saves all the weights. """
if iteration < 100:
return 30
return 100
def _check_restore_parameters(sess, saver):
""" Restore the previously trained parameters if there are any. """
ckpt = tf.train.get_checkpoint_state(os.path.dirname(config.CPT_PATH + '/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
print("Loading parameters for the Chatbot")
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Initializing fresh parameters for the Chatbot")
def _eval_test_set(sess, model, test_buckets):
""" Evaluate on the test set. """
for bucket_id in range(len(config.BUCKETS)):
if len(test_buckets[bucket_id]) == 0:
print(" Test: empty bucket %d" % (bucket_id))
continue
start = time.time()
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(test_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
print('Test bucket {}: loss {}, time {}'.format(bucket_id, step_loss, time.time() - start))
def train():
""" Train the bot """
test_buckets, data_buckets, train_buckets_scale = _get_buckets()
# in train mode, we need to create the backward path, so forwrad_only is False
model = ChatBotModel(False, config.BATCH_SIZE)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print('Running session')
sess.run(tf.global_variables_initializer())
_check_restore_parameters(sess, saver)
iteration = model.global_step.eval()
total_loss = 0
while True:
skip_step = _get_skip_step(iteration)
bucket_id = _get_random_bucket(train_buckets_scale)
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(data_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
start = time.time()
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, False)
total_loss += step_loss
iteration += 1
if iteration % skip_step == 0:
print('Iter {}: loss {}, time {}'.format(iteration, total_loss/skip_step, time.time() - start))
start = time.time()
total_loss = 0
saver.save(sess, os.path.join(config.CPT_PATH, 'chatbot'), global_step=model.global_step)
if iteration % (10 * skip_step) == 0:
# Run evals on development set and print their loss
_eval_test_set(sess, model, test_buckets)
start = time.time()
sys.stdout.flush()
def _get_user_input():
""" Get user's input, which will be transformed into encoder input later """
print("> ", end="")
sys.stdout.flush()
return sys.stdin.readline()
def _find_right_bucket(length):
""" Find the proper bucket for an encoder input based on its length """
return min([b for b in range(len(config.BUCKETS))
if config.BUCKETS[b][0] >= length])
def _construct_response(output_logits, inv_dec_vocab):
""" Construct a response to the user's encoder input.
@output_logits: the outputs from sequence to sequence wrapper.
output_logits is decoder_size np array, each of dim 1 x DEC_VOCAB
This is a greedy decoder - outputs are just argmaxes of output_logits.
"""
print(output_logits[0])
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if config.EOS_ID in outputs:
outputs = outputs[:outputs.index(config.EOS_ID)]
# Print out sentence corresponding to outputs.
return " ".join([tf.compat.as_str(inv_dec_vocab[output]) for output in outputs])
def chat():
""" in test mode, we don't to create the backward path
"""
_, enc_vocab = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.enc'))
inv_dec_vocab, _ = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.dec'))
model = ChatBotModel(True, batch_size=1)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
_check_restore_parameters(sess, saver)
output_file = open(os.path.join(config.PROCESSED_PATH, config.OUTPUT_FILE), 'a+')
# Decode from standard input.
max_length = config.BUCKETS[-1][0]
print('Welcome to TensorBro. Say something. Enter to exit. Max length is', max_length)
while True:
line = _get_user_input()
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if line == '':
break
output_file.write('HUMAN ++++ ' + line + '\n')
# Get token-ids for the input sentence.
token_ids = data.sentence2id(enc_vocab, str(line))
if (len(token_ids) > max_length):
print('Max length I can handle is:', max_length)
line = _get_user_input()
continue
# Which bucket does it belong to?
bucket_id = _find_right_bucket(len(token_ids))
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch([(token_ids, [])],
bucket_id,
batch_size=1)
# Get output logits for the sentence.
_, _, output_logits = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
response = _construct_response(output_logits, inv_dec_vocab)
print(response)
output_file.write('BOT ++++ ' + response + '\n')
output_file.write('=============================================\n')
output_file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices={'train', 'chat'},
default='train', help="mode. if not specified, it's in the train mode")
args = parser.parse_args()
if not os.path.isdir(config.PROCESSED_PATH):
data.prepare_raw_data()
data.process_data()
print('Data ready!')
# create checkpoints folder if there isn't one already
data.make_dir(config.CPT_PATH)
if args.mode == 'train':
train()
elif args.mode == 'chat':
chat()
if __name__ == '__main__':
main()
| {
"content_hash": "e896f03c4886e54262eef1a80809f29a",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 116,
"avg_line_length": 44.40996168582375,
"alnum_prop": 0.603312915192822,
"repo_name": "swirlingsand/deep-learning-foundations",
"id": "0521bbf6f0f43d10acbdfd1e3cb7d6c455d1db45",
"size": "11591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chatbot/chatbot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1564242"
},
{
"name": "Jupyter Notebook",
"bytes": "110006555"
},
{
"name": "Python",
"bytes": "194266"
},
{
"name": "Shell",
"bytes": "195"
}
],
"symlink_target": ""
} |
from flask_wtf import Form
from wtforms import TextField, BooleanField, IntegerField
from wtforms.validators import DataRequired, Length
from .models import Warehouse, Item
from mytrade.form.fields import Select2Field
from mytrade.utils import _
class WarehouseForm(Form):
id = IntegerField()
warehouse_name = TextField(_('Warehouse Name'),
validators=[DataRequired(), Length(max=25)])
company_id = Select2Field(_('Company'), default=0, coerce=int)
address1 = TextField(_('Address1'))
address2 = TextField(_('Address2'))
disabled = BooleanField(_('Disabled'))
def __init__(self, *args, **kwargs):
super(WarehouseForm, self).__init__(*args, **kwargs)
def validate(self):
initial_validation = super(WarehouseForm, self).validate()
if not initial_validation:
return False
warehouse = Warehouse.query.filter(Warehouse.warehouse_name==self.warehouse_name.data, Warehouse.id != self.id.data).first()
if warehouse:
self.warehouse_name.errors.append(_("Warehouse Name already existed"))
return False
return True
class ItemForm(Form):
id = IntegerField()
item_code = TextField(_('Item Code'),
validators=[DataRequired(), Length(min=3, max=25)])
item_name = TextField(_('Item Name'),
validators=[DataRequired(), Length(max=25)])
item_group_id = Select2Field(_('Item Group'), default=0, coerce=int)
default_unit_id = Select2Field(_('Default Unit'), default=0, coerce=int)
default_warehouse_id = Select2Field(_('Default Warehouse'), default=0, coerce=int)
description = TextField(_('Description'))
def __init__(self, *args, **kwargs):
super(ItemForm, self).__init__(*args, **kwargs)
def validate(self):
initial_validation = super(ItemForm, self).validate()
if not initial_validation:
return False
item = Item.query.filter(Item.item_code==self.item_code.data, Item.id != self.id.data).first()
if item:
self.item_code.errors.append(_("Item Code already existed"))
return False
item = Item.query.filter(Item.item_name==self.item_name.data, Item.id != self.id.data).first()
if item:
self.item_name.errors.append(_("Item Name already existed"))
return False
return True
| {
"content_hash": "b27917eeafacc972ededa2c73801427b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 132,
"avg_line_length": 33.816901408450704,
"alnum_prop": 0.6384839650145773,
"repo_name": "hellwen/mytrade",
"id": "a1d0b5dbfacb7141442cb3682a960d1bf5006d5a",
"size": "2425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mytrade/stock/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20627"
},
{
"name": "HTML",
"bytes": "32445"
},
{
"name": "JavaScript",
"bytes": "260413"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "208132"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from twisted.python.components import proxyForInterface
from twisted.web.iweb import IResponse
from requests.cookies import cookiejar_from_dict
from treq.content import content, json_content, text_content
class _Response(proxyForInterface(IResponse)):
def __init__(self, original, cookiejar):
self.original = original
self._cookiejar = cookiejar
def content(self):
return content(self.original)
def json(self, *args, **kwargs):
return json_content(self.original, *args, **kwargs)
def text(self, *args, **kwargs):
return text_content(self.original, *args, **kwargs)
def history(self):
if not hasattr(self, "previousResponse"):
raise NotImplementedError(
"Twisted < 13.1.0 does not support response history.")
response = self
history = []
while response.previousResponse is not None:
history.append(_Response(response.previousResponse,
self._cookiejar))
response = response.previousResponse
history.reverse()
return history
def cookies(self):
jar = cookiejar_from_dict({})
if self._cookiejar is not None:
for cookie in self._cookiejar:
jar.set_cookie(cookie)
return jar
| {
"content_hash": "b1c18cf0c9eedde8ad0bc5fd6c4a0ba8",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 70,
"avg_line_length": 29.1875,
"alnum_prop": 0.6331192005710207,
"repo_name": "glyph/treq",
"id": "c71423b9ed4eb4139a083f3b621b64676f1d2a9c",
"size": "1401",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "treq/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131188"
}
],
"symlink_target": ""
} |
from .client import SaltClient
__all__ = [SaltClient]
| {
"content_hash": "84a486c7b0ab9ea9b8d4c69e3693422e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.7090909090909091,
"repo_name": "cizixs/saltclient",
"id": "26d1f216ea540430774cc980d8325538a977bfab",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saltclient/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5160"
}
],
"symlink_target": ""
} |
import pipeline_dp
from pipeline_dp import partition_selection
import unittest
from unittest.mock import patch
class PartitionSelectionTest(unittest.TestCase):
@patch(
"pydp.algorithms.partition_selection.create_truncated_geometric_partition_strategy"
)
def test_truncated_gemetric(self, mock_method):
eps, delta, max_partitions = 2, 1e-3, 10
partition_selection.create_partition_selection_strategy(
pipeline_dp.PartitionSelectionStrategy.TRUNCATED_GEOMETRIC, eps,
delta, max_partitions)
mock_method.assert_called_once()
mock_method.assert_called_with(eps, delta, max_partitions)
@patch(
"pydp.algorithms.partition_selection.create_laplace_partition_strategy")
def test_truncated_gemetric(self, mock_method):
eps, delta, max_partitions = 5, 1e-2, 12
partition_selection.create_partition_selection_strategy(
pipeline_dp.PartitionSelectionStrategy.LAPLACE_THRESHOLDING, eps,
delta, max_partitions)
mock_method.assert_called_once()
mock_method.assert_called_with(eps, delta, max_partitions)
@patch(
"pydp.algorithms.partition_selection.create_gaussian_partition_strategy"
)
def test_truncated_gemetric(self, mock_method):
eps, delta, max_partitions = 1, 1e-5, 20
partition_selection.create_partition_selection_strategy(
pipeline_dp.PartitionSelectionStrategy.GAUSSIAN_THRESHOLDING, eps,
delta, max_partitions)
mock_method.assert_called_once()
mock_method.assert_called_with(eps, delta, max_partitions)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c292ec0dd487571aecdd2fcfec8f65d4",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 38.13636363636363,
"alnum_prop": 0.6942789034564958,
"repo_name": "OpenMined/PipelineDP",
"id": "c7b94f8d23fc4ab43474bb46a23f505bd1f6d740",
"size": "2253",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/partition_selection_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "42478"
},
{
"name": "Makefile",
"bytes": "573"
},
{
"name": "Python",
"bytes": "671843"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import models
from embed_video.fields import EmbedVideoField
from polymorphic.models import PolymorphicModel
from taggit.managers import TaggableManager
from rlp.projects.models import Project
class Document(PolymorphicModel):
title = models.CharField(max_length=255)
description = models.TextField()
project = models.ForeignKey(Project)
date_added = models.DateTimeField(auto_now_add=True, db_index=True)
date_updated = models.DateTimeField(auto_now=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
tags = TaggableManager()
class Meta:
ordering = ['-date_added']
def __str__(self):
return self.title
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('projects:document_detail', kwargs={
'pk': self.project.pk,
'slug': self.project.slug,
'doc_pk': self.pk
})
def get_edit_url(self):
from django.core.urlresolvers import reverse
return reverse('projects:document_edit', kwargs={
'pk': self.project.pk,
'slug': self.project.slug,
'doc_pk': self.pk,
'doc_type': self.polymorphic_ctype.model,
})
def get_delete_url(self):
from django.core.urlresolvers import reverse
return reverse('projects:document_delete', kwargs={
'pk': self.project.pk,
'slug': self.project.slug,
'doc_pk': self.pk
})
@property
def display_type(self):
if hasattr(self, 'upload'):
if self.upload.path.endswith(('.doc', '.docx')):
return "Document (Word)"
elif self.upload.path.endswith(('.pdf',)):
return "Document (pdf)"
elif self.upload.path.endswith(('.ppt', '.pptx')):
return "Slideshow (ppt)"
elif self.upload.path.endswith(('.key',)):
return "Slideshow (Keynote)"
elif self.upload.path.endswith(('.txt',)):
return "Document (txt)"
elif self.upload.path.endswith(('.rtf',)):
return "Document (rtf)"
elif self.upload.path.endswith(('.xls', '.xlsx')):
return "Spreadsheet (xls)"
elif self.upload.path.endswith(('.csv',)):
return "Spreadsheet (csv)"
elif self.upload.path.endswith(('.jpg', '.jpeg')):
return "Image (jpg)"
elif self.upload.path.endswith(('.png',)):
return "Image (png)"
elif self.upload.path.endswith(('.gif',)):
return "Image (gif)"
elif self.upload.path.endswith(('.avi',)):
return "Video (avi)"
elif self.upload.path.endswith(('.mov',)):
return "Video (mov)"
elif self.upload.path.endswith(('.zip',)):
return "Compressed File (zip)"
return 'Document'
class File(Document):
upload = models.FileField(upload_to="docs/%Y/%m/%d")
working_document = models.BooleanField(default=False,
verbose_name="Core Project Document (will appear as top-listed document)")
class Image(Document):
upload = models.ImageField(upload_to="docs/images/%Y/%m/%d", max_length=255,
height_field='height', width_field='width')
height = models.PositiveIntegerField()
width = models.PositiveIntegerField()
class Video(Document):
share_link = EmbedVideoField(help_text='Should be a Youtube or Vimeo share link e.g. https://youtu.be/xyz123')
@property
def display_type(self):
return 'Video'
class Link(Document):
url = models.URLField()
@property
def display_type(self):
return 'Link'
| {
"content_hash": "5d2022e4c3e15ba227232a45b1c4c72f",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 117,
"avg_line_length": 34.50892857142857,
"alnum_prop": 0.581888745148771,
"repo_name": "HMSBeagle1831/rapidscience",
"id": "f6021158e4d73032258c7e14a3d98c26189f4430",
"size": "3865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rlp/documents/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115769"
},
{
"name": "HTML",
"bytes": "118911"
},
{
"name": "JavaScript",
"bytes": "13496"
},
{
"name": "Python",
"bytes": "270256"
}
],
"symlink_target": ""
} |
"""
Support for TPLink HS100/HS110/HS200 smart switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.tplink/
"""
import logging
import time
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_HOST, CONF_NAME)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyHS100==0.2.3']
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_CONSUMPTION = 'Current consumption'
ATTR_TOTAL_CONSUMPTION = 'Total consumption'
ATTR_DAILY_CONSUMPTION = 'Daily consumption'
ATTR_VOLTAGE = 'Voltage'
ATTR_CURRENT = 'Current'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the TPLink switch platform."""
from pyHS100 import SmartPlug
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
add_devices([SmartPlugSwitch(SmartPlug(host), name)], True)
class SmartPlugSwitch(SwitchDevice):
"""Representation of a TPLink Smart Plug switch."""
def __init__(self, smartplug, name):
"""Initialize the switch."""
self.smartplug = smartplug
# Use the name set on the device if not set
if name is None:
self._name = self.smartplug.alias
else:
self._name = name
self._state = None
_LOGGER.debug("Setting up TP-Link Smart Plug")
# Set up emeter cache
self._emeter_params = {}
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.smartplug.turn_on()
def turn_off(self):
"""Turn the switch off."""
self.smartplug.turn_off()
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def update(self):
"""Update the TP-Link switch's state."""
try:
self._state = self.smartplug.state == \
self.smartplug.SWITCH_STATE_ON
if self.smartplug.has_emeter:
emeter_readings = self.smartplug.get_emeter_realtime()
self._emeter_params[ATTR_CURRENT_CONSUMPTION] \
= "%.1f W" % emeter_readings["power"]
self._emeter_params[ATTR_TOTAL_CONSUMPTION] \
= "%.2f kW" % emeter_readings["total"]
self._emeter_params[ATTR_VOLTAGE] \
= "%.2f V" % emeter_readings["voltage"]
self._emeter_params[ATTR_CURRENT] \
= "%.1f A" % emeter_readings["current"]
emeter_statics = self.smartplug.get_emeter_daily()
try:
self._emeter_params[ATTR_DAILY_CONSUMPTION] \
= "%.2f kW" % emeter_statics[int(time.strftime("%e"))]
except KeyError:
# device returned no daily history
pass
except OSError:
_LOGGER.warning('Could not update status for %s', self.name)
| {
"content_hash": "b116a50d60b9f44a71b5c4ce473635fe",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 30.89189189189189,
"alnum_prop": 0.6036745406824147,
"repo_name": "keerts/home-assistant",
"id": "961ee72496e99f0e72dbe2c19e600de39c75a17e",
"size": "3429",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/tplink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1546272"
},
{
"name": "Python",
"bytes": "5270263"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_cygnus_mk2.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_cygnus_mk2_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "f54491a0cf64b4474b6a471754d0748e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7051671732522796,
"repo_name": "anhstudios/swganh",
"id": "0a6f1e6482bf5324732d8a021f48cc056faf73d1",
"size": "474",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_cygnus_mk2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from webob import exc
from nova.api.openstack import wsgi
class Controller(object):
""" The Shared IP Groups Controller for the Openstack API """
def index(self, req, **kwargs):
""" Returns a list of Shared IP Groups for the user """
raise exc.HTTPNotImplemented()
def show(self, req, id, **kwargs):
""" Shows in-depth information on a specific Shared IP Group """
raise exc.HTTPNotImplemented()
def update(self, req, id, **kwargs):
""" You can't update a Shared IP Group """
raise exc.HTTPNotImplemented()
def delete(self, req, id, **kwargs):
""" Deletes a Shared IP Group """
raise exc.HTTPNotImplemented()
def detail(self, req, **kwargs):
""" Returns a complete list of Shared IP Groups """
raise exc.HTTPNotImplemented()
def create(self, req, **kwargs):
""" Creates a new Shared IP group """
raise exc.HTTPNotImplemented()
def create_resource():
return wsgi.Resource(Controller())
| {
"content_hash": "5481c2d27e65d20364557e2108a185a0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 29.257142857142856,
"alnum_prop": 0.6279296875,
"repo_name": "30loops/nova",
"id": "54d0a8334137ec23dfec7d672cb508870346e2a4",
"size": "1699",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/api/openstack/shared_ip_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
My Submissions Question Solution
Rotate an array of n elements to the right by k steps.
For example, with n = 7 and k = 3, the array [1,2,3,4,5,6,7] is rotated to [5,6,7,1,2,3,4].
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
[show hint]
Related problem: Reverse Words in a String II
class Solution:
# @param nums, a list of integer
# @param k, num of steps
# @return nothing, please modify the nums list in-place.
def rotate_1(self, nums, k):
n = len(nums)
k = k % n
nums[:] = nums[n-k:] + nums[:n-k] # Important!!
def rotate(self, nums, k):
while k > 0:
nums.insert(0, nums.pop())
k -= 1
# Reference:
# https://leetcode.com/discuss/28613/my-solution-by-using-python
# https://leetcode.com/discuss/27387/summary-of-c-solutions
| {
"content_hash": "4ca35e0727d36b760daf66fbcca01f1e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 103,
"avg_line_length": 31.17241379310345,
"alnum_prop": 0.6161504424778761,
"repo_name": "UmassJin/Leetcode",
"id": "8e8b2057fd549f41cc53c86398ab6a343d1ea4bd",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Array/Rotate_Array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "717672"
}
],
"symlink_target": ""
} |
"""Experiment class collecting information needed for a single training run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import math
import os
import time
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
__all__ = ["Experiment"]
class _EvalAndExportListener(basic_session_run_hooks.CheckpointSaverListener):
"""Listener that evaluates and exports a model after creating a checkpoint.
The `EvalAndExportListener` waits for the associated `CheckpointSaverHook`
to save a checkpoint. It then uses the provided `eval_fn` and `export_fn` to
first evaluate the model using the newly-created checkpoint, and then export
the model according to the `export_strategies` provided in the `Experiment`.
This listener is experimental and may be changed or removed in the future.
"""
def __init__(self, eval_fn, export_fn, model_dir):
"""Initializes an `EvalAndExportListener`.
Args:
eval_fn: function which evaluates the model with the following signature:
`(name, checkpoint_path) -> eval_result`
export_fn: function which exports the model according to a set of export
strategies. Has the following signature:
`(eval_result, checkpoint_path) -> export_results`
model_dir: directory which contains estimator parameters and checkpoints.
"""
self._eval_fn = eval_fn
self._export_fn = export_fn
self._model_dir = model_dir
self._latest_path = None
self._eval_result = None
self._export_results = None
def after_save(self, session, global_step_value):
"""Evaluates and exports the model after a checkpoint is created."""
# Load and cache the path of the most recent checkpoint to avoid duplicate
# searches on GCS.
logging.info("Checking for checkpoint in %s", self._model_dir)
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
logging.warning("Skipping evaluation and export since model has not been "
"saved yet.")
elif latest_path == self._latest_path:
logging.warning("Skipping evaluation due to same latest checkpoint %s.",
latest_path)
else:
self._latest_path = latest_path
self._eval_result = self._eval_fn(
name="intermediate_export", checkpoint_path=latest_path)
self._export_results = self._export_fn(
self._eval_result, checkpoint_path=latest_path)
@property
def eval_result(self):
return self._eval_result
@property
def export_results(self):
return self._export_results
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
After an experiment is created (by passing an Estimator and inputs for
training and evaluation), an Experiment instance knows how to invoke training
and eval loops in a sensible fashion for distributed training.
"""
# TODO(ispir): remove delay_workers_by_global_step and make global step based
# waiting as only behavior.
@deprecated_args(
"2016-10-23",
"local_eval_frequency is deprecated as local_run will be renamed to "
"train_and_evaluate. Use min_eval_frequency and call train_and_evaluate "
"instead. Note, however, that the default for min_eval_frequency is 1, "
"meaning models will be evaluated every time a new checkpoint is "
"available. In contrast, the default for local_eval_frequency is None, "
"resulting in evaluation occurring only after training has completed. "
"min_eval_frequency is ignored when calling the deprecated local_run.",
"local_eval_frequency")
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None,
eval_hooks=None,
local_eval_frequency=None,
eval_delay_secs=120,
continuous_eval_throttle_secs=60,
min_eval_frequency=None,
delay_workers_by_global_step=False,
export_strategies=None,
train_steps_per_iteration=None,
checkpoint_and_export=False,
saving_listeners=None):
"""Constructor for `Experiment`.
Creates an Experiment instance. None of the functions passed to this
constructor are executed at construction time. They are stored and used
when a method is executed which requires it.
Args:
estimator: Object implementing Estimator interface, which could be a
combination of @{tf.contrib.learn.Trainable} and
@{tf.contrib.learn.Evaluable} (deprecated), or
@{tf.estimator.Estimator}.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used. This should be `None` if the `estimator` is
@{tf.estimator.Estimator}. If metrics are provided they will be
*appended* to the default set.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
eval_hooks: A list of `SessionRunHook` hooks to pass to the
`Estimator`'s `evaluate` function.
local_eval_frequency: (applies only to local_run) Frequency of running
eval in steps. If `None`, runs evaluation only at the end of training.
eval_delay_secs: Start evaluating after waiting for this many seconds.
continuous_eval_throttle_secs: Do not re-evaluate unless the last
evaluation was started at least this many seconds ago for
continuous_eval().
min_eval_frequency: (applies only to train_and_evaluate). the minimum
number of steps between evaluations. Of course, evaluation does not
occur if no new snapshot is available, hence, this is the minimum.
If 0, the evaluation will only happen after training.
If None, defaults to 1, unless model_dir is on GCS, in which case the
default is 1000.
delay_workers_by_global_step: if `True` delays training workers
based on global step instead of time.
export_strategies: Iterable of `ExportStrategy`s, or a single one, or
`None`.
train_steps_per_iteration: (applies only to continuous_train_and_eval).
Perform this many (integer) number of train steps for each
training-evaluation iteration. With a small value, the model will be
evaluated more frequently with more checkpoints saved. If `None`, will
use a default value (which is smaller than `train_steps` if provided).
checkpoint_and_export: (applies only to train_and_evaluate). If `True`,
performs intermediate model checkpoints and exports during the training
process, rather than only once model training is complete. This
parameter is experimental and may be changed or removed in the future.
Setting this parameter leads to the following: the value of
`min_eval_frequency` will be ignored, and the number of steps between
evaluations and exports will instead be determined by the Estimator
configuration parameters `save_checkpoints_secs` and
`save_checkpoints_steps`. Also, this parameter leads to the creation of
a default `CheckpointSaverHook` instead of a `ValidationMonitor`, so the
provided `train_monitors` will need to be adjusted accordingly.
saving_listeners: list of `CheckpointSaverListener` objects. Used by
tf.estimator.Estimator for callbacks that run immediately before or
after checkpoint savings.
Raises:
ValueError: if `estimator` does not implement Estimator interface,
or if export_strategies has the wrong type.
"""
if isinstance(estimator, core_estimator.Estimator):
self._core_estimator_used = True
if eval_metrics is not None:
raise ValueError(
"`eval_metrics` must be `None` with `tf.estimator.Estimator`. "
"Use `eval_metric_ops` in `tf.estimator.EstimatorSpec` instead.")
else:
self._core_estimator_used = False
if not isinstance(estimator, evaluable.Evaluable):
raise ValueError(
"`estimator` must implement `tf.contrib.learn.Evaluable` "
"or `tf.estimator.Estimator`.")
if not isinstance(estimator, trainable.Trainable):
raise ValueError(
"`estimator` must implement `tf.contrib.learn.Trainable`"
"or `tf.estimator.`Estimator`.")
if saving_listeners is not None:
raise ValueError("`saving_listeners` must be `None` with "
"`tf.contrib.learn.Estimator`.")
if isinstance(estimator, tpu_estimator.TPUEstimator):
logging.warn(
"`Experiment` class cannot work with `tf.contrib.tpu.TPUEstimator`. "
"Please call `TPUEstimator` train/evaluate directly. \n"
"Details: `Experiment` class is designed for between-graph "
"distributed training, while `TPUEstimator` is working in in-graph "
"distributed mode. Use with care.")
super(Experiment, self).__init__()
# Immutable fields.
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._local_eval_frequency = local_eval_frequency
self._eval_delay_secs = eval_delay_secs
self._continuous_eval_throttle_secs = continuous_eval_throttle_secs
self._checkpoint_and_export = checkpoint_and_export
self._saving_listeners = saving_listeners
# Using 1 on a non-cached file system requires a lot of overhead to
# read the checkpoint state file. This is particular bad on GCS, so
# we use a different default. This is a temporary band-aid, to be
# fixed holistically later (b/36498507).
default_min_eval_frequency = 1000 if _is_gcs(estimator.model_dir) else 1
self._min_eval_frequency = min_eval_frequency if (
min_eval_frequency is not None) else default_min_eval_frequency
self._delay_workers_by_global_step = delay_workers_by_global_step
self._train_monitors = train_monitors[:] if train_monitors else []
self._eval_hooks = eval_hooks[:] if eval_hooks else []
self._set_export_strategies(export_strategies)
self._train_steps_per_iteration = train_steps_per_iteration
if (self._train_steps_per_iteration is not None and
not isinstance(self._train_steps_per_iteration, int)):
raise ValueError(
"`train_steps_per_iteration` must be an integer.")
@property
def estimator(self):
return self._estimator
@property
def eval_metrics(self):
return self._eval_metrics
@property
def train_steps(self):
return self._train_steps
@property
def eval_steps(self):
return self._eval_steps
def _set_export_strategies(self, values): # pylint: disable=missing-docstring
export_strategies = []
if values:
if isinstance(values, export_strategy.ExportStrategy):
export_strategies.append(values)
else:
for value in values:
if not isinstance(value, export_strategy.ExportStrategy):
raise ValueError("`export_strategies` must be an ExportStrategy,"
" an iterable of ExportStrategy, or `None`,"
" found %s." % value)
export_strategies.append(value)
self._export_strategies = tuple(export_strategies)
def extend_train_hooks(self, additional_hooks):
"""Extends the hooks for training."""
self._train_monitors.extend(additional_hooks)
def reset_export_strategies(self, new_export_strategies=None):
"""Resets the export strategies with the `new_export_strategies`.
Args:
new_export_strategies: A new list of `ExportStrategy`s, or a single one,
or None.
Returns:
The old export strategies.
"""
old_export_strategies = self._export_strategies
self._set_export_strategies(new_export_strategies)
return old_export_strategies
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
config = self._estimator.config
if isinstance(config, run_config.RunConfig):
if (config.cluster_spec and config.master and
config.environment == run_config.Environment.LOCAL):
logging.warn("ClusterSpec and master are provided, but environment is "
"set to 'local'. Set environment to 'cloud' if you intend "
"to use the distributed runtime.")
if (config.environment != run_config.Environment.LOCAL and
config.environment != run_config.Environment.GOOGLE and
config.cluster_spec and config.master):
self._start_server()
elif config.cluster_spec and config.master:
raise ValueError('For distributed runtime, Experiment class only works with'
'tf.contrib.learn.RunConfig for now, but provided {}'
.format(type(config)))
extra_hooks = []
if delay_secs is None:
task_id = self._estimator.config.task_id or 0
if self._delay_workers_by_global_step:
# Wait 5500 global steps for the second worker. Each worker waits more
# then previous one but with a diminishing number of steps.
extra_hooks.append(
basic_session_run_hooks.GlobalStepWaiterHook(
int(8000.0 * math.log(task_id + 1))))
delay_secs = 0
else:
# Wait 5 secs more for each new worker up to 60 secs.
delay_secs = min(60, task_id * 5)
if delay_secs > 0:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._call_train(
input_fn=self._train_input_fn,
max_steps=self._train_steps,
hooks=self._train_monitors + extra_hooks,
saving_listeners=self._saving_listeners)
def evaluate(self, delay_secs=None, name=None):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. Runs for
`self._eval_steps` steps, or if it's `None`, then run until input is
exhausted or another exception is raised. Start the evaluation after
`delay_secs` seconds, or if it's `None`, defaults to using
`self._eval_delay_secs` seconds.
Args:
delay_secs: Start evaluating after this many seconds. If `None`, defaults
to using `self._eval_delays_secs`.
name: Gives the name to the evauation for the case multiple evaluation is
run for the same experiment.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._call_evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=(name or "one_pass"),
hooks=self._eval_hooks)
@deprecated(
"2016-10-23",
"local_run will be renamed to train_and_evaluate and the new default "
"behavior will be to run evaluation every time there is a new "
"checkpoint.")
def local_run(self):
with _new_attr_context(self, "_min_eval_frequency"):
self._min_eval_frequency = self._local_eval_frequency
return self.train_and_evaluate()
# TODO(xiejw): Allow continuous_eval_predicate_fn to be passed via constructor
# once stopping all jobs is implemented.
def _continuous_eval(self,
input_fn,
name,
delay_secs,
throttle_delay_secs,
evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=None,
export=True):
"""Run continuous eval.
Runs infinite eval on the evaluation data set. This function starts
evaluating after `delay_secs` seconds and then runs no more than one
evaluation (with `self._eval_steps` steps each time) per
`throttle_delay_secs`. If `train_steps` is not None, will return after
global_step reaches `train_steps`.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds. If None, defaults to
self._eval_delay_secs.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. If None, defaults to
self._continuous_eval_throttle_secs.
evaluate_checkpoint_only_once: Whether to skip evaluation of checkpoints
that have already been evaluated. Default is `True`.
continuous_eval_predicate_fn: A predicate function determining whether to
continue eval after each iteration. `predicate_fn` takes the evaluation
results as arguments. At the beginning of evaluation, the passed eval
results will be None so it's expected that the predicate function
handles that gracefully. When `predicate_fn` is not specified,
continuous eval will run in an infinite loop (if `train_steps` is None)
or exit once global step reaches `train_steps`.
export: Whether to export from this step. Default is 'True'.
Raises:
ValueError: if `continuous_eval_predicate_fn` is neither None nor
callable.
"""
if (continuous_eval_predicate_fn is not None and
not callable(continuous_eval_predicate_fn)):
raise ValueError(
"`continuous_eval_predicate_fn` must be a callable, or None.")
if delay_secs is None:
delay_secs = self._eval_delay_secs
if throttle_delay_secs is None:
throttle_delay_secs = self._continuous_eval_throttle_secs
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
previous_path = None
eval_result = None
last_warning_time = 0
while (not continuous_eval_predicate_fn or
continuous_eval_predicate_fn(eval_result)):
# Exit if we have already reached number of steps to train.
if self._has_training_stopped(eval_result):
logging.info("Exiting continuous eval, global_step=%s >= "
"train_step=%s",
eval_result[ops.GraphKeys.GLOBAL_STEP],
self._train_steps)
return
start = time.time()
error_msg = None
latest_path = saver.latest_checkpoint(self._estimator.model_dir)
if not latest_path:
error_msg = ("Estimator is not fitted yet. "
"Will start an evaluation when a checkpoint is ready.")
elif evaluate_checkpoint_only_once and latest_path == previous_path:
error_msg = "No new checkpoint ready for evaluation."
if error_msg:
# Print warning message every 10 mins.
eval_result = {}
if time.time() - last_warning_time > 600:
logging.warning(error_msg)
last_warning_time = time.time()
else:
eval_result = self._call_evaluate(input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name,
checkpoint_path=latest_path,
hooks=self._eval_hooks)
# Ensure eval result is not None for next round of evaluation.
if not eval_result:
eval_result = {}
if export:
self._maybe_export(eval_result, checkpoint_path=latest_path)
# Clear warning timer and update last evaluated checkpoint
last_warning_time = 0
previous_path = latest_path
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def _has_training_stopped(self, eval_result):
"""Determines whether the training has stopped."""
if not eval_result:
return False
global_step = eval_result.get(ops.GraphKeys.GLOBAL_STEP)
return global_step and self._train_steps and (
global_step >= self._train_steps)
def continuous_eval(self,
delay_secs=None,
throttle_delay_secs=None,
evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=None,
name="continuous"):
self._continuous_eval(
self._eval_input_fn,
name=name,
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs,
evaluate_checkpoint_only_once=evaluate_checkpoint_only_once,
continuous_eval_predicate_fn=continuous_eval_predicate_fn)
def continuous_eval_on_train_data(self,
delay_secs=None,
throttle_delay_secs=None,
continuous_eval_predicate_fn=None,
name="continuous_on_train_data"):
self._continuous_eval(
self._train_input_fn,
name=name,
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs,
continuous_eval_predicate_fn=continuous_eval_predicate_fn,
export=False)
def train_and_evaluate(self):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the constructor arg
`min_eval_frequency`. When this parameter is 0, evaluation happens
only after training has completed. Note that evaluation cannot happen
more frequently than checkpoints are taken. If no new snapshots are
available when evaluation is supposed to occur, then evaluation doesn't
happen for another `min_eval_frequency` steps (assuming a checkpoint is
available at that point). Thus, settings `min_eval_frequency` to 1 means
that the model will be evaluated everytime there is a new checkpoint.
This is particular useful for a "Master" task in the cloud, whose
responsibility it is to take checkpoints, evaluate those checkpoints,
and write out summaries. Participating in training as the supervisor
allows such a task to accomplish the first and last items, while
performing evaluation allows for the second.
Returns:
The result of the `evaluate` call to the `Estimator` as well as the
export results using the specified `ExportStrategy`.
"""
# The directory to which evaluation summaries are written are determined
# by adding a suffix to 'eval'; that suffix is the 'name' parameter to
# the various evaluate(...) methods. By setting it to None, we force
# the directory name to simply be 'eval'.
eval_dir_suffix = None
# We set every_n_steps to 1, but evaluation only occurs when a new
# snapshot is available. If, by the time we finish evaluation
# there is a new snapshot, then we just evaluate again. Otherwise,
# we keep training until one becomes available.
with _new_attr_context(self, "_train_monitors"):
self._train_monitors = self._train_monitors or []
config = self._estimator.config
intermediate_export = self._checkpoint_and_export and (
config.save_checkpoints_secs or config.save_checkpoints_steps)
if intermediate_export:
# Create a partially specified evaluate function with the desired
# arguments. This will be executed by the _EvalAndExportListener,
# which will specify the latest checkpoint path.
eval_fn = functools.partial(
self._call_evaluate,
input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
hooks=self._eval_hooks)
export_listener = _EvalAndExportListener(
eval_fn=eval_fn,
export_fn=self._maybe_export,
model_dir=self._estimator.model_dir)
saver_hook = basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir=self._estimator.model_dir,
save_secs=config.save_checkpoints_secs,
save_steps=config.save_checkpoints_steps,
listeners=[export_listener])
self._train_monitors += [saver_hook]
else:
if self._min_eval_frequency:
self._train_monitors += [
monitors.ValidationMonitor(
input_fn=self._eval_input_fn,
eval_steps=self._eval_steps,
metrics=self._eval_metrics,
every_n_steps=self._min_eval_frequency,
name=eval_dir_suffix,
hooks=self._eval_hooks)
]
self.train(delay_secs=0)
# If the checkpoint_and_export flag and appropriate estimator configuration
# parameters are set, then model evaluations and exports are done during the
# training process. In particular, this will always occur at the end of
# training, so we return the most recent results to avoid performing a
# duplicate evaluation and model export.
if intermediate_export:
return export_listener.eval_result, export_listener.export_results
else:
eval_result = self._call_evaluate(
input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=eval_dir_suffix,
hooks=self._eval_hooks)
export_results = self._maybe_export(eval_result)
return eval_result, export_results
@experimental
def continuous_train_and_eval(self,
continuous_eval_predicate_fn=None):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the `train_steps_per_iteration`
(via constructor). The model will be first trained for
`train_steps_per_iteration`, and then be evaluated in turns.
This method is intended for single machine usage.
This differs from `train_and_evaluate` as follows:
1. The procedure will have train and evaluation in turns. The model
will be trained for a number of steps (usually smaller than `train_steps`
if provided) and then be evaluated. `train_and_evaluate` will train the
model for `train_steps` (no small training iterations).
2. Due to the different approach this schedule takes, it leads to two
differences in resource control. First, the resources (e.g., memory) used
by training will be released before evaluation (`train_and_evaluate` takes
double resources). Second, more checkpoints will be saved as a checkpoint
is generated at the end of each training iteration.
3. As the estimator.train starts from scratch (new graph, new states for
input, etc) at each iteration, it is recommended to have the
`train_steps_per_iteration` larger. It is also recommended to shuffle your
input.
Args:
continuous_eval_predicate_fn: A predicate function determining whether to
continue after each iteration. `predicate_fn` takes the evaluation
results as its arguments. At the beginning of evaluation, the passed
eval results will be None so it's expected that the predicate function
handles that gracefully. When `predicate_fn` is not specified, this will
run in an infinite loop or exit when global_step reaches `train_steps`.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Raises:
ValueError: if `continuous_eval_predicate_fn` is neither None nor
callable.
"""
if (continuous_eval_predicate_fn is not None and
not callable(continuous_eval_predicate_fn)):
raise ValueError(
"`continuous_eval_predicate_fn` must be a callable, or None.")
eval_result = None
export_results = None
# Set the default value for train_steps_per_iteration, which will be
# overridden by other settings.
train_steps_per_iteration = 1000
if self._train_steps_per_iteration is not None:
train_steps_per_iteration = self._train_steps_per_iteration
elif self._train_steps is not None:
train_steps_per_iteration = int(self._train_steps / 10)
while (not continuous_eval_predicate_fn or
continuous_eval_predicate_fn(eval_result)):
if self._has_training_stopped(eval_result):
# Exits once max steps of training is satisfied.
logging.info("Stop training model as max steps reached")
break
logging.info("Training model for %s steps", train_steps_per_iteration)
self._call_train(
input_fn=self._train_input_fn,
steps=train_steps_per_iteration,
hooks=self._train_monitors,
saving_listeners=self._saving_listeners)
logging.info("Evaluating model now.")
eval_result = self._call_evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass",
hooks=self._eval_hooks)
export_results = self._maybe_export(eval_result)
return eval_result, export_results
def _maybe_export(self, eval_result, checkpoint_path=None):
"""Export the Estimator using export_fn, if defined."""
export_dir_base = os.path.join(
compat.as_bytes(self._estimator.model_dir),
compat.as_bytes("export"))
export_results = []
for strategy in self._export_strategies:
export_results.append(
strategy.export(
self._estimator,
os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(strategy.name)),
checkpoint_path=checkpoint_path,
eval_result=eval_result))
return export_results
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join()
def test(self):
"""Tests training, evaluating and exporting the estimator for a single step.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
self._call_train(
input_fn=self._train_input_fn,
steps=1,
hooks=self._train_monitors,
saving_listeners=self._saving_listeners)
eval_result = self._call_evaluate(input_fn=self._eval_input_fn,
steps=1,
metrics=self._eval_metrics,
name="one_pass")
_ = self._maybe_export(eval_result)
return eval_result
def _start_server(self):
"""Creates, starts, and returns a server_lib.Server."""
config = self._estimator.config
if (not config.cluster_spec or not config.task_type or not config.master or
config.task_id is None):
raise ValueError("Could not start server; be sure to specify "
"cluster_spec, task_type, master, and task in "
"RunConfig or set the TF_CONFIG environment variable.")
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=config.tf_config,
start=False)
server.start()
return server
def _call_train(self, _sentinel=None, # pylint: disable=invalid-name,
input_fn=None, steps=None, hooks=None, max_steps=None,
saving_listeners=None):
if _sentinel is not None:
raise ValueError("_call_train should be called with keyword args only")
# Estimator in core cannot work with monitors. We need to convert them
# to hooks. For Estimator in contrib, it is converted internally. So, it is
# safe to convert for both cases.
hooks = monitors.replace_monitors_with_hooks(hooks, self._estimator)
if self._core_estimator_used:
return self._estimator.train(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
hooks=hooks,
saving_listeners=saving_listeners)
else:
return self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=hooks)
def _call_evaluate(self, _sentinel=None, # pylint: disable=invalid-name,
input_fn=None, steps=None, metrics=None, name=None,
checkpoint_path=None, hooks=None):
if _sentinel is not None:
raise ValueError("_call_evaluate should be called with keyword args only")
if self._core_estimator_used:
if metrics is not None:
raise ValueError(
"`eval_metrics` must be `None` with `tf.estimator.Estimator`")
return self._estimator.evaluate(input_fn=input_fn,
steps=steps,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
else:
return self._estimator.evaluate(input_fn=input_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@contextlib.contextmanager
def _new_attr_context(obj, attr):
"""Creates a new context in which an object's attribute can be changed.
This creates a context in which an object's attribute can be changed.
Once the context is exited, the attribute reverts to its original value.
Args:
obj: An object whose attribute to restore at the end of the context.
attr: An attribute to remember and restore at the end of the context.
Yields:
Context.
Example:
my_obj.x = 1
with _new_attr_context(my_obj, "x"):
my_obj.x = 2
print(my_obj.x)
print(my_obj.x)
"""
saved = getattr(obj, attr)
try:
yield
finally:
setattr(obj, attr, saved)
def _is_gcs(model_dir):
return model_dir and model_dir.startswith("gs://")
| {
"content_hash": "c7e8caff9093004e674c1b89df360963",
"timestamp": "",
"source": "github",
"line_count": 869,
"max_line_length": 82,
"avg_line_length": 42.601841196777904,
"alnum_prop": 0.6496042786526566,
"repo_name": "Kongsea/tensorflow",
"id": "fc4bd1f461d7bfbfcfb78201d527959055342f0a",
"size": "37711",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/experiment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "198923"
},
{
"name": "C++",
"bytes": "29494349"
},
{
"name": "CMake",
"bytes": "644855"
},
{
"name": "Go",
"bytes": "976410"
},
{
"name": "Java",
"bytes": "409984"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38189"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "270658"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26227666"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373711"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ValidationError
from rest_framework import exceptions
from rest_framework import serializers as ser
from rest_framework.fields import empty
from rest_framework.exceptions import ValidationError as DRFValidationError
from website import settings
from api.base.exceptions import Conflict, JSONAPIException
from api.base.serializers import (
JSONAPISerializer,
IDField,
TypeField,
HideIfNotWithdrawal,
NoneIfWithdrawal,
LinksField,
RelationshipField,
VersionedDateTimeField,
JSONAPIListField,
NodeFileHyperLinkField,
WaterbutlerLink,
HideIfPreprint,
LinkedNodesRelationshipSerializer,
)
from api.base.utils import absolute_reverse, get_user_auth
from api.base.parsers import NO_DATA_ERROR
from api.nodes.serializers import (
NodeCitationSerializer,
NodeLicenseSerializer,
NodeContributorsSerializer,
NodeStorageProviderSerializer,
NodeContributorsCreateSerializer,
NodeContributorDetailSerializer,
get_license_details,
NodeTagField,
)
from api.base.metrics import MetricsSerializerMixin
from api.taxonomies.serializers import TaxonomizableSerializerMixin
from framework.exceptions import PermissionsError
from website.project import signals as project_signals
from osf.exceptions import NodeStateError
from osf.models import (
BaseFileNode,
Preprint,
PreprintProvider,
Node,
NodeLicense,
)
from osf.utils import permissions as osf_permissions
from osf.exceptions import PreprintStateError
class PrimaryFileRelationshipField(RelationshipField):
def get_object(self, file_id):
return BaseFileNode.load(file_id)
def to_internal_value(self, data):
file = self.get_object(data)
return {'primary_file': file}
class NodeRelationshipField(RelationshipField):
def get_object(self, node_id):
try:
return Node.load(node_id)
except AttributeError:
raise exceptions.ValidationError(detail='Node not correctly specified.')
def to_internal_value(self, data):
node = self.get_object(data)
return {'node': node}
class PreprintProviderRelationshipField(RelationshipField):
def get_object(self, node_id):
return PreprintProvider.load(node_id)
def to_internal_value(self, data):
provider = self.get_object(data)
return {'provider': provider}
class PreprintLicenseRelationshipField(RelationshipField):
def to_internal_value(self, license_id):
license = NodeLicense.load(license_id)
if license:
return {'license_type': license}
raise exceptions.NotFound('Unable to find specified license.')
class PreprintSerializer(TaxonomizableSerializerMixin, MetricsSerializerMixin, JSONAPISerializer):
filterable_fields = frozenset([
'id',
'date_created',
'date_modified',
'date_published',
'original_publication_date',
'provider',
'is_published',
'subjects',
'reviews_state',
'node_is_public',
])
available_metrics = frozenset([
'downloads',
'views',
])
id = IDField(source='_id', read_only=True)
type = TypeField()
date_created = VersionedDateTimeField(source='created', read_only=True)
date_modified = VersionedDateTimeField(source='modified', read_only=True)
date_published = VersionedDateTimeField(read_only=True)
original_publication_date = VersionedDateTimeField(required=False, allow_null=True)
doi = ser.CharField(source='article_doi', required=False, allow_null=True)
title = ser.CharField(required=True, max_length=512)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
is_published = NoneIfWithdrawal(ser.BooleanField(required=False))
is_preprint_orphan = NoneIfWithdrawal(ser.BooleanField(read_only=True))
license_record = NodeLicenseSerializer(required=False, source='license')
tags = JSONAPIListField(child=NodeTagField(), required=False)
node_is_public = ser.BooleanField(read_only=True, source='node__is_public', help_text='Is supplementary project public?')
preprint_doi_created = NoneIfWithdrawal(VersionedDateTimeField(read_only=True))
date_withdrawn = VersionedDateTimeField(read_only=True, allow_null=True)
withdrawal_justification = HideIfNotWithdrawal(ser.CharField(required=False, read_only=True, allow_blank=True))
current_user_permissions = ser.SerializerMethodField(
help_text='List of strings representing the permissions '
'for the current user on this preprint.',
)
public = ser.BooleanField(source='is_public', required=False, read_only=True)
contributors = RelationshipField(
related_view='preprints:preprint-contributors',
related_view_kwargs={'preprint_id': '<_id>'},
)
bibliographic_contributors = RelationshipField(
related_view='preprints:preprint-bibliographic-contributors',
related_view_kwargs={'preprint_id': '<_id>'},
)
reviews_state = ser.CharField(source='machine_state', read_only=True, max_length=15)
date_last_transitioned = NoneIfWithdrawal(VersionedDateTimeField(read_only=True))
citation = NoneIfWithdrawal(RelationshipField(
related_view='preprints:preprint-citation',
related_view_kwargs={'preprint_id': '<_id>'},
))
identifiers = NoneIfWithdrawal(RelationshipField(
related_view='preprints:identifier-list',
related_view_kwargs={'preprint_id': '<_id>'},
))
node = NoneIfWithdrawal(NodeRelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
read_only=False,
many=False,
self_view='preprints:preprint-node-relationship',
self_view_kwargs={'preprint_id': '<_id>'},
))
license = PreprintLicenseRelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<license.node_license._id>'},
read_only=False,
)
provider = PreprintProviderRelationshipField(
related_view='providers:preprint-providers:preprint-provider-detail',
related_view_kwargs={'provider_id': '<provider._id>'},
read_only=False,
)
files = NoneIfWithdrawal(RelationshipField(
related_view='preprints:preprint-storage-providers',
related_view_kwargs={'preprint_id': '<_id>'},
))
primary_file = NoneIfWithdrawal(PrimaryFileRelationshipField(
related_view='files:file-detail',
related_view_kwargs={'file_id': '<primary_file._id>'},
read_only=False,
))
review_actions = RelationshipField(
related_view='preprints:preprint-review-action-list',
related_view_kwargs={'preprint_id': '<_id>'},
)
requests = NoneIfWithdrawal(RelationshipField(
related_view='preprints:preprint-request-list',
related_view_kwargs={'preprint_id': '<_id>'},
))
links = LinksField(
{
'self': 'get_preprint_url',
'html': 'get_absolute_html_url',
'doi': 'get_article_doi_url',
'preprint_doi': 'get_preprint_doi_url',
},
)
has_coi = ser.NullBooleanField(required=False)
conflict_of_interest_statement = ser.CharField(required=False, allow_blank=True, allow_null=True)
has_data_links = ser.ChoiceField(Preprint.HAS_LINKS_CHOICES, required=False)
why_no_data = ser.CharField(required=False, allow_blank=True, allow_null=True)
data_links = ser.ListField(child=ser.URLField(), required=False)
has_prereg_links = ser.ChoiceField(Preprint.HAS_LINKS_CHOICES, required=False)
why_no_prereg = ser.CharField(required=False, allow_blank=True, allow_null=True)
prereg_links = ser.ListField(child=ser.URLField(), required=False)
prereg_link_info = ser.ChoiceField(Preprint.PREREG_LINK_INFO_CHOICES, required=False, allow_blank=True)
class Meta:
type_ = 'preprints'
@property
def subjects_related_view(self):
# Overrides TaxonomizableSerializerMixin
return 'preprints:preprint-subjects'
@property
def subjects_view_kwargs(self):
# Overrides TaxonomizableSerializerMixin
return {'preprint_id': '<_id>'}
def get_preprint_url(self, obj):
return absolute_reverse('preprints:preprint-detail', kwargs={'preprint_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version']})
def get_absolute_url(self, obj):
return self.get_preprint_url(obj)
def get_article_doi_url(self, obj):
return 'https://doi.org/{}'.format(obj.article_doi) if obj.article_doi else None
def get_current_user_permissions(self, obj):
user = self.context['request'].user
return obj.get_permissions(user)[::-1]
def get_preprint_doi_url(self, obj):
doi = None
doi_identifier = obj.get_identifier('doi')
if doi_identifier:
doi = doi_identifier.value
# if a preprint hasn't been published yet, don't show the DOI prematurely
elif obj.is_published:
client = obj.get_doi_client()
doi = client.build_doi(preprint=obj) if client else None
return 'https://doi.org/{}'.format(doi) if doi else None
def update(self, preprint, validated_data):
assert isinstance(preprint, Preprint), 'You must specify a valid preprint to be updated'
auth = get_user_auth(self.context['request'])
if not preprint.has_permission(auth.user, osf_permissions.WRITE):
raise exceptions.PermissionDenied(detail='User must have admin or write permissions to update a preprint.')
published = validated_data.pop('is_published', None)
if published and preprint.provider.is_reviewed:
raise Conflict('{} uses a moderation workflow, so preprints must be submitted for review instead of published directly. Submit a preprint by creating a `submit` Action at {}'.format(
preprint.provider.name,
absolute_reverse(
'preprints:preprint-review-action-list', kwargs={
'version': self.context['request'].parser_context['kwargs']['version'],
'preprint_id': preprint._id,
},
),
))
save_preprint = False
recently_published = False
primary_file = validated_data.pop('primary_file', None)
if primary_file:
self.set_field(preprint.set_primary_file, primary_file, auth)
save_preprint = True
old_tags = set(preprint.tags.values_list('name', flat=True))
if 'tags' in validated_data:
current_tags = set(validated_data.pop('tags', []))
elif self.partial:
current_tags = set(old_tags)
else:
current_tags = set()
for new_tag in (current_tags - old_tags):
preprint.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
preprint.remove_tag(deleted_tag, auth=auth)
if 'node' in validated_data:
node = validated_data.pop('node', None)
self.set_field(preprint.set_supplemental_node, node, auth)
save_preprint = True
if 'subjects' in validated_data:
subjects = validated_data.pop('subjects', None)
self.update_subjects(preprint, subjects, auth)
save_preprint = True
if 'title' in validated_data:
title = validated_data['title']
self.set_field(preprint.set_title, title, auth)
save_preprint = True
if 'description' in validated_data:
description = validated_data['description']
self.set_field(preprint.set_description, description, auth)
save_preprint = True
if 'article_doi' in validated_data:
doi = settings.DOI_FORMAT.format(prefix=preprint.provider.doi_prefix, guid=preprint._id)
if validated_data['article_doi'] == doi:
raise exceptions.ValidationError(
detail=f'The `article_doi` "{doi}" is already associated with this'
f' preprint please enter a peer-reviewed publication\'s DOI',
)
preprint.article_doi = validated_data['article_doi']
save_preprint = True
if 'license_type' in validated_data or 'license' in validated_data:
license_details = get_license_details(preprint, validated_data)
self.set_field(preprint.set_preprint_license, license_details, auth)
save_preprint = True
if 'original_publication_date' in validated_data:
preprint.original_publication_date = validated_data['original_publication_date'] or None
save_preprint = True
if 'has_coi' in validated_data:
try:
preprint.update_has_coi(auth, validated_data['has_coi'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'conflict_of_interest_statement' in validated_data:
try:
preprint.update_conflict_of_interest_statement(auth, validated_data['conflict_of_interest_statement'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'has_data_links' in validated_data:
try:
preprint.update_has_data_links(auth, validated_data['has_data_links'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'why_no_data' in validated_data:
try:
preprint.update_why_no_data(auth, validated_data['why_no_data'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'data_links' in validated_data:
try:
preprint.update_data_links(auth, validated_data['data_links'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'has_prereg_links' in validated_data:
try:
preprint.update_has_prereg_links(auth, validated_data['has_prereg_links'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'why_no_prereg' in validated_data:
try:
preprint.update_why_no_prereg(auth, validated_data['why_no_prereg'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'prereg_links' in validated_data:
try:
preprint.update_prereg_links(auth, validated_data['prereg_links'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if 'prereg_link_info' in validated_data:
try:
preprint.update_prereg_link_info(auth, validated_data['prereg_link_info'])
except PreprintStateError as e:
raise exceptions.ValidationError(detail=str(e))
if published is not None:
if not preprint.primary_file:
raise exceptions.ValidationError(detail='A valid primary_file must be set before publishing a preprint.')
self.set_field(preprint.set_published, published, auth)
save_preprint = True
recently_published = published
preprint.set_privacy('public', log=False, save=True)
if save_preprint:
preprint.save()
if recently_published:
for author in preprint.contributors:
if author != auth.user:
project_signals.contributor_added.send(preprint, contributor=author, auth=auth, email_template='preprint')
return preprint
def set_field(self, func, val, auth, save=False):
try:
func(val, auth)
except PermissionsError as e:
raise exceptions.PermissionDenied(detail=str(e))
except (ValueError, ValidationError, NodeStateError) as e:
raise exceptions.ValidationError(detail=str(e))
class PreprintCreateSerializer(PreprintSerializer):
# Overrides PreprintSerializer to make id nullable, adds `create`
id = IDField(source='_id', required=False, allow_null=True)
def create(self, validated_data):
creator = self.context['request'].user
provider = validated_data.pop('provider', None)
if not provider:
raise exceptions.ValidationError(detail='You must specify a valid provider to create a preprint.')
title = validated_data.pop('title')
description = validated_data.pop('description', '')
preprint = Preprint(provider=provider, title=title, creator=creator, description=description)
preprint.save()
return self.update(preprint, validated_data)
class PreprintCitationSerializer(NodeCitationSerializer):
class Meta:
type_ = 'preprint-citation'
class PreprintContributorsSerializer(NodeContributorsSerializer):
""" Separate from UserSerializer due to necessity to override almost every field as read only
"""
preprint = RelationshipField(
related_view='preprints:preprint-detail',
related_view_kwargs={'preprint_id': '<preprint._id>'},
)
node = HideIfPreprint(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
))
class Meta:
type_ = 'contributors'
def get_absolute_url(self, obj):
return absolute_reverse(
'preprints:preprint-contributor-detail',
kwargs={
'user_id': obj.user._id,
'preprint_id': self.context['request'].parser_context['kwargs']['preprint_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class PreprintContributorsCreateSerializer(NodeContributorsCreateSerializer, PreprintContributorsSerializer):
"""
Overrides PreprintContributorsSerializer to add email, full_name, send_email, and non-required index and users field.
id and index redefined because of the two serializers we've inherited
"""
id = IDField(source='_id', required=False, allow_null=True)
index = ser.IntegerField(required=False, source='_order')
email_preferences = ['preprint', 'false']
class PreprintContributorDetailSerializer(NodeContributorDetailSerializer, PreprintContributorsSerializer):
"""
Overrides NodeContributorDetailSerializer to set the preprint instead of the node
id and index redefined because of the two serializers we've inherited
"""
id = IDField(required=True, source='_id')
index = ser.IntegerField(required=False, read_only=False, source='_order')
class PreprintStorageProviderSerializer(NodeStorageProviderSerializer):
node = HideIfPreprint(ser.CharField(source='node_id', read_only=True))
preprint = ser.CharField(source='node_id', read_only=True)
files = NodeFileHyperLinkField(
related_view='preprints:preprint-files',
related_view_kwargs={'preprint_id': '<node._id>'},
kind='folder',
never_embed=True,
)
links = LinksField({
'upload': WaterbutlerLink(),
})
class PreprintNodeRelationshipSerializer(LinkedNodesRelationshipSerializer):
data = ser.DictField()
def run_validation(self, data=empty):
"""
Overwrites run_validation.
JSONAPIOnetoOneRelationshipParser parses data into {id: None, type: None} if data is null,
which is what this endpoint expects.
"""
if data == {}:
raise JSONAPIException(source={'pointer': '/data'}, detail=NO_DATA_ERROR)
if data.get('type', None) is not None and data.get('id', None) is not None:
raise DRFValidationError({'data': 'Data must be null. This endpoint can only be used to unset the supplemental project.'}, 400)
return data
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {
'data': None,
'self': obj,
}
def update(self, instance, validated_data):
auth = get_user_auth(self.context['request'])
preprint = instance['self']
preprint.unset_supplemental_node(auth=auth)
preprint.save()
return self.make_instance_obj(preprint)
links = LinksField({
'self': 'get_self_url',
})
| {
"content_hash": "f255afaaa22878b009e07a058b42f2a0",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 194,
"avg_line_length": 38.750936329588015,
"alnum_prop": 0.6563088967283622,
"repo_name": "Johnetordoff/osf.io",
"id": "2a379d93bed266342b34285dcd4b735583f8fb19",
"size": "20693",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "api/preprints/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373738"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11587197"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0023_auto_20160131_1919'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='kind',
field=models.CharField(choices=[(b'email', b'Email'), (b'webhook', b'Webhook'), (b'hipchat', b'HipChat'), (b'slack', b'Slack'), (b'pd', b'PagerDuty'), (b'po', b'Pushover'), (b'victorops', b'VictorOps')], max_length=20),
),
]
| {
"content_hash": "1fa19eacdcaf38217526cb1739468b8f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 231,
"avg_line_length": 30.77777777777778,
"alnum_prop": 0.5884476534296029,
"repo_name": "BetterWorks/healthchecks",
"id": "236a11dd887798a5fee4b10459624ec3a859a8e2",
"size": "624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hc/api/migrations/0024_auto_20160203_2227.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16692"
},
{
"name": "Dockerfile",
"bytes": "322"
},
{
"name": "HTML",
"bytes": "166324"
},
{
"name": "JavaScript",
"bytes": "20198"
},
{
"name": "Less",
"bytes": "202764"
},
{
"name": "Procfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "167802"
},
{
"name": "Shell",
"bytes": "377"
}
],
"symlink_target": ""
} |
import csv
import time
import tempfile
import os
#this code requires matplotlib to be installed
#please refer to http://matplotlib.org/downloads.html
import matplotlib.pyplot as plt
class TabularData:
def __init__(self, data, header=None):
self.data = data
self.header = header
###############################################################
def simulation(dataA, dataB):
time.sleep(2)
all_data = dataA.data + dataB.data
(f, name) = tempfile.mkstemp(prefix='vtweather')
os.close(f)
writer = csv.writer(open(name, 'w'), delimiter=':')
writer.writerows(all_data)
#res = registry.get_descriptor_by_name('edu.utah.sci.vistrails.basic',
# 'File').module()
#res.name = name
return name
def csvRead(f):
reader = csv.reader(open(f, 'rU'), delimiter=':')
header = []
data = []
for row in reader:
data.append(row)
tab_data = TabularData(data, header)
#self.setResult('data', tab_data)
return tab_data
def extractColumn(d, column_name, column):
data = d.data
header = d.header
#Test to check whether column_name is not empty
if column_name:
if header is None:
print("Data does not contain header")
#column_name = self.getInputFromPort('columnName')
try:
idx = header.index(column_name)
except ValueError:
print("Data does not contain column" + column)
else:
idx = column
col_data = []
for row in data:
print('processing row', row)
col_data.append(row[idx])
return col_data
def mplScatter(x,y):
kwargs = {}
#kwargs['y'] = y
#kwargs['x'] = x
plt.scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None, verts=None, **kwargs)
plt.xlabel('Temperature')
plt.ylabel('Precipitation')
plt.savefig("output.png")
######################################################################################
#Main Program
print('Reading data data1.dat...')
dataA = csvRead('data1.dat')
print('Reading data data2.dat...')
dataB = csvRead('data2.dat')
#Simulation
print('Executing simulation...')
tempFile = simulation(dataA, dataB)
#tempFile was generated by Simulation
print('Reading temporary file...')
d = csvRead(tempFile)
#GetPrecipitation
print('Extracting precipitation...')
columnY = extractColumn(d, '', 1)
#GetTemperature
print('Extracting temperature...')
columnX = extractColumn(d, '', 0)
#Transform column values in a list of float
print('Converting values...')
out1 = [float(i) for i in columnX]
out2 = [float(i) for i in columnY]
print('Generating result...')
mplScatter(out1, out2)
| {
"content_hash": "c1ee98c87d682a3a7fccd20394658ad7",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 92,
"avg_line_length": 28.408163265306122,
"alnum_prop": 0.5926724137931034,
"repo_name": "gems-uff/noworkflow",
"id": "a6f0b35fcad23d09c9c3049f303d9bfed76c645b",
"size": "2784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture/noworkflow/resources/demo/1/step3/simulation_complete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176047"
},
{
"name": "HTML",
"bytes": "238"
},
{
"name": "JavaScript",
"bytes": "787748"
},
{
"name": "Jupyter Notebook",
"bytes": "5241520"
},
{
"name": "Prolog",
"bytes": "18527"
},
{
"name": "Python",
"bytes": "656680"
},
{
"name": "TypeScript",
"bytes": "122003"
}
],
"symlink_target": ""
} |
"""
The ASL module of niftyfit, which wraps the fitting methods in NiftyFit.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/\
data'))
>>> os.chdir(datadir)
"""
from ..base import TraitedSpec, traits, CommandLineInputSpec
from .base import NiftyFitCommand
from ..niftyreg.base import get_custom_path
class FitAslInputSpec(CommandLineInputSpec):
""" Input Spec for FitAsl. """
desc = 'Filename of the 4D ASL (control/label) source image (mandatory).'
source_file = traits.File(position=1,
exists=True,
argstr='-source %s',
mandatory=True,
desc=desc)
pasl = traits.Bool(desc='Fit PASL ASL data [default]', argstr='-pasl')
pcasl = traits.Bool(desc='Fit PCASL ASL data', argstr='-pcasl')
# *** Output options:
desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).'
cbf_file = traits.File(name_source=['source_file'],
name_template='%s_cbf.nii.gz',
argstr='-cbf %s', desc=desc)
error_file = traits.File(name_source=['source_file'],
name_template='%s_error.nii.gz',
argstr='-error %s',
desc='Filename of the CBF error map.')
syn_file = traits.File(name_source=['source_file'],
name_template='%s_syn.nii.gz',
argstr='-syn %s',
desc='Filename of the synthetic ASL data.')
# *** Input options (see also fit_qt1 for generic T1 fitting):
desc = 'Filename of the estimated input T1 map (in ms).'
t1map = traits.File(exists=True, argstr='-t1map %s', desc=desc)
desc = 'Filename of the estimated input M0 map.'
m0map = traits.File(exists=True, argstr='-m0map %s', desc=desc)
desc = 'Filename of the estimated input M0 map error.'
m0mape = traits.File(exists=True, argstr='-m0mape %s', desc=desc)
desc = 'Filename of a [1,2,5]s Inversion Recovery volume (T1/M0 fitting \
carried out internally).'
ir_volume = traits.File(exists=True, argstr='-IRvolume %s', desc=desc)
desc = 'Output of [1,2,5]s Inversion Recovery fitting.'
ir_output = traits.File(exists=True, argstr='-IRoutput %s', desc=desc)
# *** Experimental options (Choose those suitable for the model!):
mask = traits.File(position=2,
exists=True,
desc='Filename of image mask.',
argstr='-mask %s')
t1_art_cmp = traits.Float(desc='T1 of arterial component [1650ms].',
argstr='-T1a %f')
desc = 'Single plasma/tissue partition coefficient [0.9ml/g].'
plasma_coeff = traits.Float(desc=desc, argstr='-L %f')
desc = 'Labelling efficiency [0.99 (pasl), 0.85 (pcasl)], ensure any \
background suppression pulses are included in -eff'
eff = traits.Float(desc=desc, argstr='-eff %f')
desc = 'Outlier rejection for multi CL volumes (enter z-score threshold \
(e.g. 2.5)) [off].'
out = traits.Float(desc=desc, argstr='-out %f')
# *** PCASL options (Choose those suitable for the model!):
pld = traits.Float(desc='Post Labelling Delay [2000ms].', argstr='-PLD %f')
ldd = traits.Float(desc='Labelling Duration [1800ms].', argstr='-LDD %f')
desc = 'Difference in labelling delay per slice [0.0 ms/slice.'
dpld = traits.Float(desc=desc, argstr='-dPLD %f')
# *** PASL options (Choose those suitable for the model!):
t_inv1 = traits.Float(desc='Saturation pulse time [800ms].',
argstr='-Tinv1 %f')
t_inv2 = traits.Float(desc='Inversion time [2000ms].', argstr='-Tinv2 %f')
desc = 'Difference in inversion time per slice [0ms/slice].'
dt_inv2 = traits.Float(desc=desc, argstr='-dTinv2 %f')
# *** Other experimental assumptions:
# Not programmed yet
# desc = 'Slope and intercept for Arterial Transit Time.'
# ATT = traits.Float(desc=desc, argstr='-ATT %f')
gm_t1 = traits.Float(desc='T1 of GM [1150ms].', argstr='-gmT1 %f')
gm_plasma = traits.Float(desc='Plasma/GM water partition [0.95ml/g].',
argstr='-gmL %f')
gm_ttt = traits.Float(desc='Time to GM [ATT+0ms].', argstr='-gmTTT %f')
wm_t1 = traits.Float(desc='T1 of WM [800ms].', argstr='-wmT1 %f')
wm_plasma = traits.Float(desc='Plasma/WM water partition [0.82ml/g].',
argstr='-wmL %f')
wm_ttt = traits.Float(desc='Time to WM [ATT+0ms].', argstr='-wmTTT %f')
# *** Segmentation options:
desc = 'Filename of the 4D segmentation (in ASL space) for L/T1 \
estimation and PV correction {WM,GM,CSF}.'
seg = traits.File(exists=True, argstr='-seg %s', desc=desc)
desc = 'Use sigmoid to estimate L from T1: L(T1|gmL,wmL) [Off].'
sig = traits.Bool(desc=desc, argstr='-sig')
desc = 'Simple PV correction (CBF=vg*CBFg + vw*CBFw, with CBFw=f*CBFg) \
[0.25].'
pv0 = traits.Int(desc=desc, argstr='-pv0 %d')
pv2 = traits.Int(desc='In plane PV kernel size [3x3].', argstr='-pv2 %d')
pv3 = traits.Tuple(traits.Int, traits.Int, traits.Int,
desc='3D kernel size [3x3x1].',
argstr='-pv3 %d %d %d')
desc = 'Multiply CBF by this value (e.g. if CL are mislabelled use -1.0).'
mul = traits.Float(desc=desc, argstr='-mul %f')
mulgm = traits.Bool(desc='Multiply CBF by segmentation [Off].',
argstr='-sig')
desc = 'Set PV threshold for switching off LSQR [O.05].'
pv_threshold = traits.Bool(desc=desc, argstr='-pvthreshold')
segstyle = traits.Bool(desc='Set CBF as [gm,wm] not [wm,gm].',
argstr='-segstyle')
class FitAslOutputSpec(TraitedSpec):
""" Output Spec for FitAsl. """
desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).'
cbf_file = traits.File(exists=True, desc=desc)
desc = 'Filename of the CBF error map.'
error_file = traits.File(exists=True, desc=desc)
desc = 'Filename of the synthetic ASL data.'
syn_file = traits.File(exists=True, desc=desc)
class FitAsl(NiftyFitCommand):
"""Interface for executable fit_asl from Niftyfit platform.
Use NiftyFit to perform ASL fitting.
ASL fitting routines (following EU Cost Action White Paper recommendations)
Fits Cerebral Blood Flow maps in the first instance.
`Source code <https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyFit-Release>`_
Examples
--------
>>> from nipype.interfaces import niftyfit
>>> node = niftyfit.FitAsl()
>>> node.inputs.source_file = 'asl.nii.gz'
>>> node.cmdline # doctest: +ALLOW_UNICODE
'fit_asl -source asl.nii.gz -cbf asl_cbf.nii.gz -error asl_error.nii.gz \
-syn asl_syn.nii.gz'
"""
_cmd = get_custom_path('fit_asl', env_dir='NIFTYFITDIR')
input_spec = FitAslInputSpec
output_spec = FitAslOutputSpec
_suffix = '_fit_asl'
| {
"content_hash": "82a3f2af79b785e378e2c20ed5161f70",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 46.10322580645161,
"alnum_prop": 0.6055135740274279,
"repo_name": "mick-d/nipype",
"id": "f0cc8bc19bfdcbfa6e295d89762fd905347e640c",
"size": "7261",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/niftyfit/asl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import sys
import time
import random
import zmq
def main(myself, others):
print "Hello, I am", myself
context = zmq.Context()
# State Back-End
statebe = context.socket(zmq.PUB)
# State Front-End
statefe = context.socket(zmq.SUB)
statefe.setsockopt(zmq.SUBSCRIBE, '')
bind_address = "ipc://%s-state.ipc" % myself
statebe.bind(bind_address)
for other in others:
statefe.connect("ipc://%s-state.ipc" % other)
time.sleep(1.0)
poller = zmq.Poller()
poller.register(statefe, zmq.POLLIN)
while True:
########## Solution with poll() ##########
socks = dict(poller.poll(1000))
try:
# Handle incoming status message
if socks[statefe] == zmq.POLLIN:
msg = statefe.recv_multipart()
print 'Received:', msg
except KeyError:
# Send our address and a random value
# for worker availability
msg = [bind_address, str(random.randrange(1, 10))]
statebe.send_multipart(msg)
##################################
######### Solution with select() #########
# pollin, pollout, pollerr = zmq.select([statefe], [], [], 1)
#
# if pollin and pollin[0] == statefe:
# # Handle incoming status message
# msg = statefe.recv_multipart()
# print 'Received:', msg
#
# else:
# # Send our address and a random value
# # for worker availability
# msg = [bind_address, str(random.randrange(1, 10))]
# statebe.send_multipart(msg)
##################################
poller.unregister(statefe)
time.sleep(1.0)
if __name__ == '__main__':
if len(sys.argv) >= 2:
main(myself=sys.argv[1], others=sys.argv[2:])
else:
print "Usage: peering.py <myself> <peer_1> ... <peer_N>"
sys.exit(1)
| {
"content_hash": "c4b371b79e9d0c446a4c4718e46313e1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 68,
"avg_line_length": 26.208333333333332,
"alnum_prop": 0.5357710651828299,
"repo_name": "krattai/noo-ebs",
"id": "044bbef4cc9f054f9d89682ac23f2facc5397e4f",
"size": "2038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/zeroMQ-guide2/examples/Python/peering1.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ActionScript",
"bytes": "2384"
},
{
"name": "Assembly",
"bytes": "4590201"
},
{
"name": "Awk",
"bytes": "396"
},
{
"name": "Batchfile",
"bytes": "19241"
},
{
"name": "C",
"bytes": "15563482"
},
{
"name": "C#",
"bytes": "265955"
},
{
"name": "C++",
"bytes": "691846"
},
{
"name": "CMake",
"bytes": "104078"
},
{
"name": "CSS",
"bytes": "72772"
},
{
"name": "DTrace",
"bytes": "1258"
},
{
"name": "Erlang",
"bytes": "4424888"
},
{
"name": "GAP",
"bytes": "1517"
},
{
"name": "HTML",
"bytes": "65461"
},
{
"name": "Haxe",
"bytes": "6282"
},
{
"name": "Java",
"bytes": "6899"
},
{
"name": "JavaScript",
"bytes": "494026"
},
{
"name": "Lua",
"bytes": "274783"
},
{
"name": "M4",
"bytes": "107581"
},
{
"name": "Makefile",
"bytes": "143161"
},
{
"name": "NSIS",
"bytes": "27658"
},
{
"name": "Objective-C",
"bytes": "13321"
},
{
"name": "PHP",
"bytes": "43263"
},
{
"name": "PLpgSQL",
"bytes": "80625"
},
{
"name": "Perl",
"bytes": "344546"
},
{
"name": "Python",
"bytes": "500718"
},
{
"name": "QML",
"bytes": "150"
},
{
"name": "QMake",
"bytes": "3028"
},
{
"name": "Ragel",
"bytes": "46210"
},
{
"name": "Roff",
"bytes": "120721"
},
{
"name": "Ruby",
"bytes": "121530"
},
{
"name": "Shell",
"bytes": "293349"
},
{
"name": "TeX",
"bytes": "788237"
},
{
"name": "XSLT",
"bytes": "1459"
},
{
"name": "Yacc",
"bytes": "5139"
}
],
"symlink_target": ""
} |
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
.. spelling::
ip
"""
from typing import Optional
from proxy.http import httpStatusCodes
from proxy.http.proxy import HttpProxyBasePlugin
from proxy.http.parser import HttpParser
from proxy.http.exception import HttpRequestRejected
class MyProxyPlugin(HttpProxyBasePlugin):
"""Drop traffic by inspecting incoming client IP address."""
def before_upstream_connection(
self, request: HttpParser,
) -> Optional[HttpParser]:
assert not self.flags.unix_socket_path and self.client.addr
if self.client.addr[0] in '127.0.0.1,::1'.split(','):
raise HttpRequestRejected(
status_code=httpStatusCodes.I_AM_A_TEAPOT,
reason=b'I\'m a tea pot',
)
return request
| {
"content_hash": "8d5874028ed32dc631b4fccecab7c2ed",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 31.852941176470587,
"alnum_prop": 0.6740535549399815,
"repo_name": "abhinavsingh/proxy.py",
"id": "26566757c9fb543226c1658e65dcd5379a2fff4f",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "skeleton/app/plugins/my_proxy_plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "891"
},
{
"name": "Dockerfile",
"bytes": "1222"
},
{
"name": "HTML",
"bytes": "3454"
},
{
"name": "JavaScript",
"bytes": "2260"
},
{
"name": "Jupyter Notebook",
"bytes": "29773"
},
{
"name": "Makefile",
"bytes": "6399"
},
{
"name": "Procfile",
"bytes": "387"
},
{
"name": "Python",
"bytes": "680280"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "19211"
},
{
"name": "TypeScript",
"bytes": "23642"
}
],
"symlink_target": ""
} |
import webob
from nova.compute import vm_states
import nova.context
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_instance
class CommonMixin(object):
def setUp(self):
super(CommonMixin, self).setUp()
self.compute_api = None
self.context = nova.context.RequestContext('fake', 'fake')
def _make_request(self, url, body):
req = webob.Request.blank('/v3' + url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
return req.get_response(self.app)
def _stub_instance_get(self, uuid=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
instance = fake_instance.fake_instance_obj(self.context,
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.compute_api.get(self.context, uuid, expected_attrs=None,
want_objects=True).AndReturn(instance)
return instance
def _stub_instance_get_failure(self, exc_info, uuid=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
self.compute_api.get(self.context, uuid, expected_attrs=None,
want_objects=True).AndRaise(exc_info)
return uuid
def _test_non_existing_instance(self, action, body_map=None):
uuid = uuidutils.generate_uuid()
self._stub_instance_get_failure(
exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % uuid,
{action: body_map.get(action)})
self.assertEqual(404, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_action(self, action, body=None, method=None,
compute_api_args_map={}):
if method is None:
method = action
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(self.compute_api, method)(self.context, instance, *args,
**kwargs)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{action: body})
self.assertEqual(202, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_invalid_state(self, action, method=None, body_map=None,
compute_api_args_map=None):
if method is None:
method = action
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(self.compute_api, method)(self.context, instance,
*args, **kwargs).AndRaise(
exception.InstanceInvalidState(
attr='vm_state', instance_uuid=instance.uuid,
state='foo', method=method))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{action: body_map.get(action)})
self.assertEqual(409, res.status_int)
self.assertIn("Cannot \'%s\' while instance" % action, res.body)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_locked_instance(self, action, method=None, body=None,
compute_api_args_map={}):
if method is None:
method = action
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(self.compute_api, method)(self.context, instance, *args,
**kwargs).AndRaise(
exception.InstanceIsLocked(instance_uuid=instance.uuid))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{action: body})
self.assertEqual(409, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
class CommonTests(CommonMixin, test.NoDBTestCase):
def _test_actions(self, actions, method_translations={}, body_map={},
args_map={}):
for action in actions:
method = method_translations.get(action)
body = body_map.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_action(action, method=method, body=body,
compute_api_args_map=args_map)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def _test_actions_with_non_existed_instance(self, actions, body_map={}):
for action in actions:
self._test_non_existing_instance(action,
body_map=body_map)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def _test_actions_raise_conflict_on_invalid_state(
self, actions, method_translations={}, body_map={}, args_map={}):
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_invalid_state(action, method=method,
body_map=body_map,
compute_api_args_map=args_map)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def _test_actions_with_locked_instance(self, actions,
method_translations={},
body_map={}, args_map={}):
for action in actions:
method = method_translations.get(action)
body = body_map.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_locked_instance(action, method=method, body=body,
compute_api_args_map=args_map)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
| {
"content_hash": "342b8ee34cbc61a63cfb653b42916bbc",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 77,
"avg_line_length": 40.825581395348834,
"alnum_prop": 0.5693534605525491,
"repo_name": "ewindisch/nova",
"id": "39c12b639322a9304f911b65e00f2826774d240b",
"size": "7614",
"binary": false,
"copies": "10",
"ref": "refs/heads/docker-ci",
"path": "nova/tests/api/openstack/compute/plugins/v3/admin_only_action_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13736252"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
} |
"""
The FormattedDecimal class allows locale aware formatting of currency
amounts as decimals. This is intended to be used internally to format
total values and extra values automatically when printed.
"""
from django.conf import settings
from decimal import Decimal
from django.utils.safestring import mark_safe
from django.utils import numberformat
from django.utils.encoding import smart_str
# babel has more comprehensive localisation, if it's available, we'll use that.
try:
import babel.numbers
except ImportError:
babel = None
DEFAULT_DECIMAL_HTML = ('<span class="money">'
'<span class="currency">%(curr_sym)s</span>%(major)s'
'<span class="cents">%(decimal_sym)s%(minor)s</span>'
'</span>')
class FormattedDecimal(Decimal):
""" A formatted decimal according to the given locale and currency. """
def __new__(cls, value=0, context=None, summary_instance=None):
""" Create a new immutable Decimal object, adding our custom
attributes.
"""
obj = Decimal.__new__(cls, value, context)
obj.initialise_context(summary_instance)
return obj
def initialise_context(self, summary_instance):
self.locale = summary_instance._meta.locale or settings.LANGUAGE_CODE
self.currency = summary_instance._meta.currency
self.HTML = summary_instance._meta.decimal_html or DEFAULT_DECIMAL_HTML
if babel:
self.locale = babel.core.Locale.parse(self.locale, sep="-")
@property
def html(self):
""" Provides a marked up version of the figure which can be easily
styled. eg 123.45 will be marked up as:
<span class="money">
<span class="currency">$</span>123<span class="cents">.45</span>
</span>
"""
return mark_safe(self.HTML % self.elements)
@property
def elements(self):
""" Returns a dict of the various elements for localised display.
eg en_AU:
value "1,234.56"
curr_sym "$"
decimal_sym "."
major "1,234"
minor "56"
Additional items may be present if available.
"""
# If babel is available, use its comprehensive locale skills
if babel:
value = self
curr_sym = self.locale.currency_symbols.get(self.currency,
self.currency)
decimal_sym = self.locale.number_symbols.get('decimal', ".")
value = babel.numbers.format_decimal(value, "#,##0.00",
locale=self.locale)
# If no babel, use Django's built-in locale data
else:
value = "%.02f" % self
curr_sym = self.currency
decimal_sym = get_format('DECIMAL_SEPARATOR', self.locale)
group_sym = get_format('THOUSAND_SEPARATOR', self.locale)
num_group = get_format('NUMBER_GROUPING', self.locale)
value = numberformat.format(value, decimal_sym, None,
num_group, group_sym)
major, minor = value.rsplit(decimal_sym, 1)
return locals().copy()
@property
def raw(self):
""" Return the decimal unformatted, as the Decimal class would have it.
"""
return super(FormattedDecimal, self).__unicode__()
def __unicode__(self):
""" Return a formatted version of the Decimal, using the preset locale
and currency.
"""
if babel and self.currency:
return babel.numbers.format_currency(self, self.currency,
locale=self.locale)
else:
return self.elements['value']
#
# DJANGO REPRODUCTIONS
# The following two functions are an edited copy of the Django locale
# handling functions, which hardcodes system locale.
# These can be replaced if/when Django makes its version more flexible.
#
from django.conf import settings
from django.utils.translation import get_language, to_locale, check_for_language
from django.utils.importlib import import_module
def get_format_modules(reverse=False, locale=None):
"""
Returns an iterator over the format modules found in the project and Django.
"""
modules = []
if not locale or not check_for_language(get_language()) \
or not settings.USE_L10N:
return modules
if not locale:
locale = get_language()
locale = to_locale(locale)
if settings.FORMAT_MODULE_PATH:
format_locations = [settings.FORMAT_MODULE_PATH + '.%s']
else:
format_locations = []
format_locations.append('django.conf.locale.%s')
for location in format_locations:
for l in (locale, locale.split('_')[0]):
try:
mod = import_module('.formats', location % l)
except ImportError:
pass
else:
# Don't return duplicates
if mod not in modules:
modules.append(mod)
if reverse:
modules.reverse()
return modules
def get_format(format_type, locale=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
"""
format_type = smart_str(format_type)
if settings.USE_L10N:
for module in get_format_modules(locale=locale):
try:
return getattr(module, format_type)
except AttributeError:
pass
return getattr(settings, format_type)
| {
"content_hash": "d7503d851fb0a819e4d585c0330d9dba",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 36.11656441717791,
"alnum_prop": 0.5846781042976049,
"repo_name": "willhardy/Roll-Your-Own",
"id": "d2e34f4824a5d7a60896181d42381055edc947b7",
"size": "5933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rollyourown/commerce/utils/formatting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "113029"
}
],
"symlink_target": ""
} |
from .file_utils import fetch_or_upload_file_links, fetch_or_upload_html_links
import time as _time
from installed_clients.baseclient import ServerError as _DFUError
from uuid import uuid4
""" Utilities for creating reports using DataFileUtil """
def create_report(params, dfu):
"""
Create a simple report
:param params: see the KIDL spec for the create() parameters
:param dfu: instance of DataFileUtil
:return: report data
"""
report_name = "report_" + str(uuid4())
workspace_id = _get_workspace_id(dfu, params)
# Empty defaults for merging
report_data = {
'objects_created': [],
'text_message': '',
}
report_data.update(params['report'])
save_object_params = {
'id': workspace_id,
'objects': [{
'type': 'KBaseReport.Report',
'data': report_data,
'name': report_name,
'meta': {},
'hidden': 1
}]
}
obj = _save_object(dfu, save_object_params)
ref = _get_object_ref(obj)
return {'ref': ref, 'name': report_name}
def create_extended(params, dfu, templater):
"""
Create an extended report
This will upload files to shock if you provide scratch paths instead of shock_ids
:param params: see the KIDL spec for create_extended_report() parameters
:param dfu: instance of DataFileUtil
:return: uploaded report data - {'ref': r, 'name': n}
"""
file_links = params.get('file_links', [])
html_links = params.get('html_links', [])
files = fetch_or_upload_file_links(dfu, file_links, templater) # see ./file_utils.py
html_files = fetch_or_upload_html_links(dfu, html_links, templater)
report_data = {
'text_message': params.get('message'),
'file_links': files,
'html_links': html_files,
'warnings': params.get('warnings', []),
'direct_html': params.get('direct_html'),
'direct_html_link_index': params.get('direct_html_link_index'),
'objects_created': params.get('objects_created', []),
'html_window_height': params.get('html_window_height'),
'summary_window_height': params.get('summary_window_height')
}
report_name = params.get('report_object_name', 'report_' + str(uuid4()))
workspace_id = _get_workspace_id(dfu, params)
save_object_params = {
'id': workspace_id,
'objects': [{
'type': 'KBaseReport.Report',
'data': report_data,
'name': report_name,
'meta': {},
'hidden': 1
}]
}
obj = _save_object(dfu, save_object_params)
ref = _get_object_ref(obj)
return {'ref': ref, 'name': report_name}
def _get_workspace_id(dfu, params):
"""
Get the workspace ID from the params, which may either have 'workspace_id'
or 'workspace_name'. Workspace ID is immutable so should take precedence.
"""
if 'workspace_id' in params:
return params.get('workspace_id')
return dfu.ws_name_to_id(params['workspace_name'])
def _get_object_ref(obj):
""" Get the reference string from an uploaded dfu object """
return str(obj[6]) + '/' + str(obj[0]) + '/' + str(obj[4])
def _save_object(dfu, params):
""" Save an object with DFU using error handling """
try:
return dfu.save_objects(params)[0]
except _DFUError as err:
print(f'{_time.time()} DataFileUtil exception: {err}')
raise err
except Exception as err:
print(f'{_time.time()} Unexpected DataFileUtil exception: {err}')
raise err
| {
"content_hash": "b02730dceb66e41243ea297436003543",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 89,
"avg_line_length": 34.26923076923077,
"alnum_prop": 0.6066217732884399,
"repo_name": "msneddon/KBaseReport",
"id": "fed98b4114b1065107473eff6a9f85291b73d49f",
"size": "3588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/KBaseReport/utils/report_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3020"
},
{
"name": "Java",
"bytes": "16444"
},
{
"name": "JavaScript",
"bytes": "3825"
},
{
"name": "Makefile",
"bytes": "2718"
},
{
"name": "Perl",
"bytes": "12359"
},
{
"name": "Python",
"bytes": "40360"
},
{
"name": "Ruby",
"bytes": "1974"
},
{
"name": "Shell",
"bytes": "1209"
}
],
"symlink_target": ""
} |
"""Leetcode 131. Palindrome Partitioning
Medium
URL: https://leetcode.com/problems/palindrome-partitioning/
Given a string s, partition s such that
every substring of the partition is a palindrome.
Return all possible palindrome partitioning of s.
Example:
Input: "aab"
Output:
[
["aa","b"],
["a","a","b"]
]
"""
class SolutionBacktrack(object):
def _backtrack(self, result, temp, s, start):
if start == len(s):
# Use shallow copy.
result.append(temp[:])
return None
# Iterate starting from start.
for i in range(start, len(s)):
# Check partial string s[start:i+1] is palindrome.
partial = s[start:i+1]
if partial == partial[::-1]:
temp.append(partial)
# Further check the remaining string is also a palinfrome.
self._backtrack(result, temp, s, i + 1)
# Pop for backtracking.
temp.pop()
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
Time complexity: O(n*2^n), where n is the length of s.
Space complexity: O(n).
"""
# Apply backtracking.
result = []
temp = []
start = 0
self._backtrack(result, temp, s, start)
return result
def main():
s = "aab"
print SolutionBacktrack().partition(s)
if __name__ == '__main__':
main()
| {
"content_hash": "f9e6a7dc98889271b9a901190921328f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 74,
"avg_line_length": 23.35483870967742,
"alnum_prop": 0.5497237569060773,
"repo_name": "bowen0701/algorithms_data_structures",
"id": "661a80ea0dfab002e2aa4d5ca99c81a1a1d17931",
"size": "1448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lc0131_palindrome_partitioning.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "108750"
}
],
"symlink_target": ""
} |
"""Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import warnings
from collections import defaultdict
import platform
import inspect
import re
import numpy as np
from . import __version__
from ._config import get_config
from .utils import _IS_32BIT
from .utils._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
from .utils.validation import check_X_y
from .utils.validation import check_array
from .utils._estimator_html_repr import estimator_html_repr
from .utils.validation import _deprecate_positional_args
@_deprecate_positional_args
def clone(estimator, *, safe=True):
"""Constructs a new unfitted estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fitted on any data.
If the estimator's `random_state` parameter is an integer (or if the
estimator doesn't have a `random_state` parameter), an *exact clone* is
returned: the clone and the original estimator will give the exact same
results. Otherwise, *statistical clone* is returned: the clone might
yield different results from the original estimator. More details can be
found in :ref:`randomness`.
Parameters
----------
estimator : {list, tuple, set} of estimator instance or a single \
estimator instance
The estimator or group of estimators to be cloned.
safe : bool, default=True
If safe is False, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params') or isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
if isinstance(estimator, type):
raise TypeError("Cannot clone object. " +
"You should provide an instance of " +
"scikit-learn estimator instead of a class.")
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn "
"estimator as it does not implement a "
"'get_params' method."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError('Cannot clone object %s, as the constructor '
'either does not set or modifies parameter %s' %
(estimator, name))
return new_object
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, default=0
The offset in characters to add at the begin of each line.
printer : callable, default=repr
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(params.items())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
class BaseEstimator:
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self, N_CHAR_MAX=700):
# N_CHAR_MAX is the (approximate) maximum number of non-blank
# characters to render. We pass it as an optional parameter to ease
# the tests.
from .utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
# use ellipsis for sequences with a lot of elements
pp = _EstimatorPrettyPrinter(
compact=True, indent=1, indent_at_name=True,
n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW)
repr_ = pp.pformat(self)
# Use bruteforce ellipsis when there are a lot of non-blank characters
n_nonblank = len(''.join(repr_.split()))
if n_nonblank > N_CHAR_MAX:
lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
regex = r'^(\s*\S){%d}' % lim
# The regex '^(\s*\S){%d}' % n
# matches from the start of the string until the nth non-blank
# character:
# - ^ matches the start of string
# - (pattern){n} matches n repetitions of pattern
# - \s*\S matches a non-blank char following zero or more blanks
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::-1]).end()
if '\n' in repr_[left_lim:-right_lim]:
# The left side and right side aren't on the same line.
# To avoid weird cuts, e.g.:
# categoric...ore',
# we need to start the right side with an appropriate newline
# character so that it renders properly as:
# categoric...
# handle_unknown='ignore',
# so we add [^\n]*\n which matches until the next \n
regex += r'[^\n]*\n'
right_lim = re.match(regex, repr_[::-1]).end()
ellipsis = '...'
if left_lim + len(ellipsis) < len(repr_) - right_lim:
# Only add ellipsis if it results in a shorter repr
repr_ = repr_[:left_lim] + '...' + repr_[-right_lim:]
return repr_
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith('sklearn.'):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith('sklearn.'):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk.".format(
self.__class__.__name__, pickle_version, __version__),
UserWarning)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, '_more_tags'):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
def _check_n_features(self, X, reset):
"""Set the `n_features_in_` attribute, or check against it.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
reset : bool
If True, the `n_features_in_` attribute is set to `X.shape[1]`.
If False and the attribute exists, then check that it is equal to
`X.shape[1]`. If False and the attribute does *not* exist, then
the check is skipped.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
n_features = X.shape[1]
if reset:
self.n_features_in_ = n_features
return
if not hasattr(self, "n_features_in_"):
# Skip this check if the expected number of expected input features
# was not recorded by calling fit first. This is typically the case
# for stateless transformers.
return
if n_features != self.n_features_in_:
raise ValueError(
f"X has {n_features} features, but {self.__class__.__name__} "
f"is expecting {self.n_features_in_} features as input.")
def _validate_data(self, X, y='no_validation', reset=True,
validate_separately=False, **check_params):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,), default='no_validation'
The targets.
- If `None`, `check_array` is called on `X`. If the estimator's
requires_y tag is True, then an error will be raised.
- If `'no_validation'`, `check_array` is called on `X` and the
estimator's requires_y tag is ignored. This is a default
placeholder and is never meant to be explicitly set.
- Otherwise, both `X` and `y` are checked with either `check_array`
or `check_X_y` depending on `validate_separately`.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if `y` is not None.
"""
if y is None:
if self._get_tags()['requires_y']:
raise ValueError(
f"This {self.__class__.__name__} estimator "
f"requires y to be passed, but the target y is None."
)
X = check_array(X, **check_params)
out = X
elif isinstance(y, str) and y == 'no_validation':
X = check_array(X, **check_params)
out = X
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
X = check_array(X, **check_X_params)
y = check_array(y, **check_y_params)
else:
X, y = check_X_y(X, y, **check_params)
out = X, y
if check_params.get('ensure_2d', True):
self._check_n_features(X, reset=reset)
return out
@property
def _repr_html_(self):
"""HTML representation of estimator.
This is redundant with the logic of `_repr_mimebundle_`. The latter
should be favorted in the long term, `_repr_html_` is only
implemented for consumers who do not interpret `_repr_mimbundle_`.
"""
if get_config()["display"] != 'diagram':
raise AttributeError("_repr_html_ is only defined when the "
"'display' configuration option is set to "
"'diagram'")
return self._repr_html_inner
def _repr_html_inner(self):
"""This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
"""
return estimator_html_repr(self)
def _repr_mimebundle_(self, **kwargs):
"""Mime bundle used by jupyter kernels to display estimator"""
output = {"text/plain": repr(self)}
if get_config()["display"] == 'diagram':
output["text/html"] = estimator_html_repr(self)
return output
class ClassifierMixin:
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` wrt. `y`.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
def _more_tags(self):
return {'requires_y': True}
class RegressorMixin:
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Return the coefficient of determination :math:`R^2` of the
prediction.
The coefficient :math:`R^2` is defined as :math:`(1 - \\frac{u}{v})`,
where :math:`u` is the residual sum of squares ``((y_true - y_pred)
** 2).sum()`` and :math:`v` is the total sum of squares ``((y_true -
y_true.mean()) ** 2).sum()``. The best possible score is 1.0 and it
can be negative (because the model can be arbitrarily worse). A
constant model that always predicts the expected value of `y`,
disregarding the input features, would get a :math:`R^2` score of
0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` wrt. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
"""
from .metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred, sample_weight=sample_weight)
def _more_tags(self):
return {'requires_y': True}
class ClusterMixin:
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""
Perform clustering on `X` and returns cluster labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,), dtype=np.int64
Cluster labels.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
def _more_tags(self):
return {"preserves_dtype": []}
class BiclusterMixin:
"""Mixin class for all bicluster estimators in scikit-learn."""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the `i`'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : ndarray, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : ndarray, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the `i`'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
n_rows : int
Number of rows in the bicluster.
n_cols : int
Number of columns in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Return the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array-like of shape (n_samples, n_features)
The data.
Returns
-------
submatrix : ndarray of shape (n_rows, n_cols)
The submatrix corresponding to bicluster `i`.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
class TransformerMixin:
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""
Fit to data, then transform it.
Fits transformer to `X` and `y` with optional parameters `fit_params`
and returns a transformed version of `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin:
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Return the score of the model on the data `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
score : float
"""
pass
class OutlierMixin:
"""Mixin class for all outlier detection estimators in scikit-learn."""
_estimator_type = "outlier_detector"
def fit_predict(self, X, y=None):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X).predict(X)
class MetaEstimatorMixin:
_required_parameters = ["estimator"]
"""Mixin class for all meta estimators in scikit-learn."""
class MultiOutputMixin:
"""Mixin to mark estimators that support multioutput."""
def _more_tags(self):
return {'multioutput': True}
class _UnstableArchMixin:
"""Mark estimators that are non-determinstic on 32bit or PowerPC"""
def _more_tags(self):
return {'non_deterministic': (
_IS_32BIT or platform.machine().startswith(('ppc', 'powerpc')))}
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_outlier_detector(estimator):
"""Return True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "outlier_detector"
def _is_pairwise(estimator):
"""Returns True if estimator is pairwise.
- If the `_pairwise` attribute and the tag are present and consistent,
then use the value and not issue a warning.
- If the `_pairwise` attribute and the tag are present and not
consistent, use the `_pairwise` value and issue a deprecation
warning.
- If only the `_pairwise` attribute is present and it is not False,
issue a deprecation warning and use the `_pairwise` value.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if the estimator is pairwise and False otherwise.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
has_pairwise_attribute = hasattr(estimator, '_pairwise')
pairwise_attribute = getattr(estimator, '_pairwise', False)
pairwise_tag = _safe_tags(estimator, key="pairwise")
if has_pairwise_attribute:
if pairwise_attribute != pairwise_tag:
warnings.warn("_pairwise was deprecated in 0.24 and will be "
"removed in 0.26. Set the estimator tags of your "
"estimator instead", FutureWarning)
return pairwise_attribute
# use pairwise tag when the attribute is not present
return pairwise_tag
| {
"content_hash": "b102678e36387a8e5f00d8815c01c5df",
"timestamp": "",
"source": "github",
"line_count": 854,
"max_line_length": 79,
"avg_line_length": 35.441451990632316,
"alnum_prop": 0.5720421581260118,
"repo_name": "ndingwall/scikit-learn",
"id": "3d49ec4fe96f69d57508307f39fb31e87246989e",
"size": "30267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6794973"
},
{
"name": "Shell",
"bytes": "13442"
}
],
"symlink_target": ""
} |
import os
import time
import Queue
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, DirModifiedEvent
# Alias this as this can be called during interpreter shutdown
_Empty = Queue.Empty
class EventHandler(FileSystemEventHandler):
def __init__(self, callback=None):
if callback is not None:
self.queue = None
self.callback = callback
else:
self.queue = Queue.Queue()
self.callback = self.queue.put
def on_any_event(self, event):
if not isinstance(event, DirModifiedEvent):
item = (time.time(), event.event_type, event.src_path)
if self.queue is not None:
self.queue.put(item)
else:
self.callback(*item)
class BasicWatcher(object):
def __init__(self, paths, callback=None):
self.event_handler = EventHandler(callback=callback)
self.observer = Observer()
for path in paths:
self.observer.schedule(self.event_handler, path, recursive=True)
self.observer.setDaemon(True)
def is_interesting(self, time, event_type, path):
return True
def __iter__(self):
if self.event_handler.queue is None:
raise RuntimeError('watcher used with callback')
while 1:
try:
item = self.event_handler.queue.get(timeout=1)
if self.is_interesting(*item):
yield item
except _Empty:
pass
class Watcher(BasicWatcher):
def __init__(self, env, output_path=None):
BasicWatcher.__init__(self, paths=[env.root_path])
self.env = env
self.output_path = output_path
def is_interesting(self, time, event_type, path):
if self.env.is_uninteresting_source_name(os.path.basename(path)):
return False
if self.output_path is not None and \
os.path.abspath(path).startswith(self.output_path):
return False
return True
def watch(env):
"""Returns a generator of file system events in the environment."""
watcher = Watcher(env)
watcher.observer.start()
try:
for event in watcher:
yield event
except KeyboardInterrupt:
watcher.observer.stop()
| {
"content_hash": "d6225ea435b3125db37fc50e82a60f9b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 76,
"avg_line_length": 29.379746835443036,
"alnum_prop": 0.6092201637225334,
"repo_name": "lektor/lektor-archive",
"id": "ad0bdec210cff34768b01269c2b8097c07e57865",
"size": "2321",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lektor/watcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "49"
},
{
"name": "CSS",
"bytes": "12727"
},
{
"name": "HTML",
"bytes": "4512"
},
{
"name": "JavaScript",
"bytes": "135255"
},
{
"name": "Makefile",
"bytes": "5009"
},
{
"name": "Python",
"bytes": "345731"
},
{
"name": "Shell",
"bytes": "8114"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from .models import Stop, Agency
from . import forms
from traffic_stops import base_views
class Home(base_views.Home):
form_class = forms.AgencySearchForm
template_name = 'md.html'
success_url = 'md:agency-detail'
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
context['find_a_stop_form'] = forms.SearchForm()
return context
def search(request):
query = None
if request.method == 'GET' and request.GET:
form = forms.SearchForm(request.GET)
if form.is_valid():
query = form.get_query()
else:
form = forms.SearchForm()
if query:
stops = Stop.objects.filter(query)
else:
stops = Stop.objects.none()
context = {
'form': form,
'stops': stops,
}
return render(request, 'md/search.html', context)
class AgencyList(base_views.AgencyList):
model = Agency
form_class = forms.AgencySearchForm
success_url = 'md:agency-detail'
class AgencyDetail(base_views.AgencyDetail):
model = Agency
stop_model = Stop
| {
"content_hash": "7e2cacd6e8f4dfe8cd12e2f6a82151b3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 62,
"avg_line_length": 24.382978723404257,
"alnum_prop": 0.6387434554973822,
"repo_name": "OpenDataPolicingNC/Traffic-Stops",
"id": "8b293d06725fa83b541fec5ae83b0d901daf8aa9",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "md/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14249"
},
{
"name": "Dockerfile",
"bytes": "1114"
},
{
"name": "Elixir",
"bytes": "40"
},
{
"name": "HCL",
"bytes": "2989"
},
{
"name": "HTML",
"bytes": "112505"
},
{
"name": "JavaScript",
"bytes": "99858"
},
{
"name": "Jupyter Notebook",
"bytes": "130974"
},
{
"name": "Makefile",
"bytes": "2662"
},
{
"name": "PLpgSQL",
"bytes": "11003"
},
{
"name": "Python",
"bytes": "261956"
},
{
"name": "SaltStack",
"bytes": "10013"
},
{
"name": "Scheme",
"bytes": "20526"
},
{
"name": "Shell",
"bytes": "250814"
}
],
"symlink_target": ""
} |
from usched import Sched, wait
from pushbutton import Pushbutton, descriptor
def stop(fTim, objSch): # Stop the scheduler after fTim seconds
yield from wait(fTim)
objSch.stop()
def x5print(*args):
print("X5 released " +args[0]) # Demo of argument passing
def x6print(*args):
print("X6 pressed " + args[0])
def yellowlong(*args):
print(args[0] +" yellow")
def yellowdbl(*args):
print(args[0] +" yellow")
def test(duration = 0): # responds to switches
if duration:
print("Tests pushbuttons for {:5d} seconds".format(duration))
else:
print("Tests pushbuttons")
objSched = Sched()
Pushbutton(objSched, 'X5', descriptor,
false_func = x5print, false_func_args = ("Red",)) # X5 triggers on open
Pushbutton(objSched, 'X6', descriptor,
true_func = x6print, true_func_args = ("Yellow",),
long_func = yellowlong, long_func_args = ("Long press",),
double_func = yellowdbl, double_func_args = ("Double click",)) # X6 triggers on close
if duration:
objSched.add_thread(stop(duration, objSched))
objSched.run()
test(20)
| {
"content_hash": "877a1c7847cc2a9916a39bbfbf87ff83",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 99,
"avg_line_length": 32.972972972972975,
"alnum_prop": 0.5967213114754099,
"repo_name": "peterhinch/Micropython-scheduler",
"id": "d9af9455fb748920495b6c01024a31ad63e467ec",
"size": "1359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pushbuttontest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61878"
}
],
"symlink_target": ""
} |
from flask import Flask, g
from flask_wtf.csrf import CsrfProtect
from flask_login import current_user, LoginManager
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from werkzeug.security import generate_password_hash
from server.database.models import Base, Entry, Feed, User
from .main.views import main
app = Flask(__name__, instance_relative_config=True)
app.config.from_object("config")
app.config.from_pyfile("config.py")
app.register_blueprint(main)
app.config["DATABASE_PATH"] = "sqlite:///{}".format(
os.path.join(os.path.dirname(__file__), app.config["DATABASE_PATH"]))
csrf = CsrfProtect()
csrf.init_app(app)
login_manager = LoginManager()
login_manager.login_view = "main.login"
@login_manager.user_loader
def load_user(user_id):
return g.db.query(User).get(int(user_id))
app.login_manager = None
@app.before_first_request
def init_app():
init_db(create=False)
g.db.query(User).delete()
password = generate_password_hash(app.config["PASSWORD"])
user = User(app.config["USER_NAME"], password)
g.db.add(user)
g.db.commit()
g.user = current_user
if app.login_manager is None:
app.login_manager = login_manager
app.login_manager.init_app(app)
@app.before_request
def init_db(create=False):
engine = create_engine(app.config["DATABASE_PATH"])
Session = sessionmaker(bind=engine)
g.db = Session()
if create or not engine.dialect.has_table(engine.connect(), "feed"):
Entry.__table__.drop(engine, checkfirst=True)
Feed.__table__.drop(engine, checkfirst=True)
Base.metadata.create_all(engine, checkfirst=True)
@app.teardown_appcontext
def shutdown_session(exception=None):
db = getattr(g, 'db', None)
if db is not None:
g.db.close()
| {
"content_hash": "f2043b38d5dd23c1a95ae9ac78f0a5c9",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 73,
"avg_line_length": 28.967741935483872,
"alnum_prop": 0.7071269487750557,
"repo_name": "flacerdk/smoke-signal",
"id": "ee42921edce5c025d5016a07bde94c770b28b671",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3638"
},
{
"name": "HTML",
"bytes": "1680"
},
{
"name": "JavaScript",
"bytes": "36667"
},
{
"name": "Python",
"bytes": "36540"
}
],
"symlink_target": ""
} |
from organizations.models import Organization
from organizations.views.base import ViewFactory
from organizations.views.mixins import AdminRequiredMixin
from organizations.views.mixins import MembershipRequiredMixin
from organizations.views.mixins import OwnerRequiredMixin
bases = ViewFactory(Organization)
class OrganizationList(bases.OrganizationList):
pass
class OrganizationCreate(bases.OrganizationCreate):
"""
Allows any user to create a new organization.
"""
pass
class OrganizationDetail(MembershipRequiredMixin, bases.OrganizationDetail):
pass
class OrganizationUpdate(AdminRequiredMixin, bases.OrganizationUpdate):
pass
class OrganizationDelete(OwnerRequiredMixin, bases.OrganizationDelete):
pass
class OrganizationUserList(MembershipRequiredMixin, bases.OrganizationUserList):
pass
class OrganizationUserDetail(AdminRequiredMixin, bases.OrganizationUserDetail):
pass
class OrganizationUserUpdate(AdminRequiredMixin, bases.OrganizationUserUpdate):
pass
class OrganizationUserCreate(AdminRequiredMixin, bases.OrganizationUserCreate):
pass
class OrganizationUserRemind(AdminRequiredMixin, bases.OrganizationUserRemind):
pass
class OrganizationUserDelete(AdminRequiredMixin, bases.OrganizationUserDelete):
pass
| {
"content_hash": "5319b69c85a0f1d09c38b32d3fd79396",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 23.618181818181817,
"alnum_prop": 0.8252501924557352,
"repo_name": "bennylope/django-organizations",
"id": "06d5cc0e811901d0aa093da24cd8b0991a34c80f",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/organizations/views/default.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "5416"
},
{
"name": "Makefile",
"bytes": "2171"
},
{
"name": "Python",
"bytes": "204918"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2015-10-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(PolicyClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2015-10-01-preview") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-resource/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "b7da9af579b862b1500ebea652be3bfa",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 107,
"avg_line_length": 52.775862068965516,
"alnum_prop": 0.7174126102580856,
"repo_name": "Azure/azure-sdk-for-python",
"id": "418d621007352ededa0887314ac6eba2e40c5e92",
"size": "3529",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2015_10_01_preview/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: f231d82b9b26
Revises: e68c4473c581
Create Date: 2018-03-20 19:47:54.991259
"""
from alembic import op
import sqlalchemy as sa
from superset.utils import generic_find_uq_constraint_name
# revision identifiers, used by Alembic.
revision = 'f231d82b9b26'
down_revision = 'e68c4473c581'
conv = {
'uq': 'uq_%(table_name)s_%(column_0_name)s',
}
names = {
'columns': 'column_name',
'metrics': 'metric_name',
}
def upgrade():
# Reduce the size of the metric_name column for constraint viability.
with op.batch_alter_table('metrics', naming_convention=conv) as batch_op:
batch_op.alter_column(
'metric_name',
existing_type=sa.String(length=512),
type_=sa.String(length=255),
existing_nullable=True,
)
# Add the missing uniqueness constraints.
for table, column in names.items():
with op.batch_alter_table(table, naming_convention=conv) as batch_op:
batch_op.create_unique_constraint(
'uq_{}_{}'.format(table, column),
[column, 'datasource_id'],
)
def downgrade():
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
# Restore the size of the metric_name column.
with op.batch_alter_table('metrics', naming_convention=conv) as batch_op:
batch_op.alter_column(
'metric_name',
existing_type=sa.String(length=255),
type_=sa.String(length=512),
existing_nullable=True,
)
# Remove the previous missing uniqueness constraints.
for table, column in names.items():
with op.batch_alter_table(table, naming_convention=conv) as batch_op:
batch_op.drop_constraint(
generic_find_uq_constraint_name(
table,
{column, 'datasource_id'},
insp,
) or 'uq_{}_{}'.format(table, column),
type_='unique',
)
| {
"content_hash": "491fd45bead98f3ad93cc4fbc2772e2e",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 77,
"avg_line_length": 28.549295774647888,
"alnum_prop": 0.5974346324617662,
"repo_name": "timifasubaa/incubator-superset",
"id": "5d3acbef1d8130206d3c251cb7e4812c8ca5e048",
"size": "2027",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superset/migrations/versions/f231d82b9b26_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99374"
},
{
"name": "HTML",
"bytes": "100560"
},
{
"name": "JavaScript",
"bytes": "1563519"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1083714"
},
{
"name": "Shell",
"bytes": "1557"
},
{
"name": "Smarty",
"bytes": "1048"
}
],
"symlink_target": ""
} |
from flask import Flask, Response
from twilio.util import TwilioCapability
app = Flask(__name__)
@app.route('/token', methods=['GET'])
def get_capability_token():
"""Respond to incoming requests."""
# Find these values at twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
capability = TwilioCapability(account_sid, auth_token)
# Twilio Application Sid
application_sid = 'APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
capability.allow_client_outgoing(application_sid)
capability.allow_client_incoming('jenny')
token = capability.generate()
return Response(token, mimetype='application/jwt')
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "e5cd493c58438fb9f528ac5132a1aa55",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 58,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.7115646258503401,
"repo_name": "teoreteetik/api-snippets",
"id": "756bf932f6d23876e5fc4989e32712be52233cce",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/capability-token/capability-token.5.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.