text stringlengths 4 1.02M | meta dict |
|---|---|
import datetime as dt
import pytest
import pytz
import stix2
from .constants import CAMPAIGN_ID, CAMPAIGN_MORE_KWARGS, IDENTITY_ID
EXPECTED = """{
"type": "campaign",
"spec_version": "2.1",
"id": "campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"created_by_ref": "identity--311b2d2d-f010-4473-83ec-1edf84858f4c",
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"name": "Green Group Attacks Against Finance",
"description": "Campaign by Green Group against a series of targets in the financial services sector."
}"""
def test_campaign_example():
campaign = stix2.v21.Campaign(
**CAMPAIGN_MORE_KWARGS
)
assert campaign.serialize(pretty=True) == EXPECTED
@pytest.mark.parametrize(
"data", [
EXPECTED,
{
"type": "campaign",
"spec_version": "2.1",
"id": CAMPAIGN_ID,
"created": "2016-04-06T20:03:00Z",
"modified": "2016-04-06T20:03:00Z",
"created_by_ref": IDENTITY_ID,
"description": "Campaign by Green Group against a series of targets in the financial services sector.",
"name": "Green Group Attacks Against Finance",
},
],
)
def test_parse_campaign(data):
cmpn = stix2.parse(data, version="2.1")
assert cmpn.type == 'campaign'
assert cmpn.spec_version == '2.1'
assert cmpn.id == CAMPAIGN_ID
assert cmpn.created == dt.datetime(2016, 4, 6, 20, 3, 0, tzinfo=pytz.utc)
assert cmpn.modified == dt.datetime(2016, 4, 6, 20, 3, 0, tzinfo=pytz.utc)
assert cmpn.created_by_ref == IDENTITY_ID
assert cmpn.description == "Campaign by Green Group against a series of targets in the financial services sector."
assert cmpn.name == "Green Group Attacks Against Finance"
# TODO: Add other examples
| {
"content_hash": "e076726749b1053e7e63385bfc10230c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 118,
"avg_line_length": 32.26315789473684,
"alnum_prop": 0.6383904295812942,
"repo_name": "oasis-open/cti-python-stix2",
"id": "edc7d7779bed1d7c91d8d2d8916716f2148fc837",
"size": "1839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stix2/test/v21/test_campaign.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1737742"
}
],
"symlink_target": ""
} |
"""Forseti Server utilities."""
import logging
# pylint: disable=protected-access
def autoclose_stream(f):
"""Decorator to close gRPC stream.
Args:
f (func): The function to decorate
Returns:
wrapper: wrapper of the decorator
"""
def wrapper(*args):
"""Wrapper function, checks context state to close stream.
Args:
*args (list): All arguments provided to the wrapped function.
Yields:
object: Whatever the wrapped function yields to the stream.
"""
def closed(context):
"""Returns true iff the connection is closed.
Args:
context (object): the connection to check
Returns:
bool: whether the connection is closed
"""
return context._state.client == 'closed'
context = args[-1]
for result in f(*args):
yield result
if closed(context):
return
return wrapper
def logcall(f, level=logging.CRITICAL):
"""Call logging decorator.
Args:
f (func): The function to decorate
level (str): the level of logging
Returns:
wrapper: wrapper of the decorator
"""
def wrapper(*args, **kwargs):
"""Implements the log wrapper including parameters and result.
Args:
*args: All args provided to the wrapped function.
**kwargs: All kwargs provided to the wrapped function.
Returns:
object: the f execution result
"""
logging.log(level, 'enter %s(%s)', f.__name__, args)
result = f(*args, **kwargs)
logging.log(level, 'exit %s(%s) -> %s', f.__name__, args, result)
return result
return wrapper
def mutual_exclusive(lock):
""" Mutex decorator.
Args:
lock (object): The lock to lock out exclusive method
Returns:
object: decorator generator
"""
def wrap(f):
"""Decorator generator.
Args:
f (func): the function to decorate
Returns:
func: the decorated function
"""
def func(*args, **kw):
"""Decorated functionality, mutexing wrapped function.
Args:
*args: All args provided to the wrapped function
**kw: All kw provided to the wrapped function
Returns:
object: the execution results of f
"""
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return func
return wrap
def oneof(*args):
"""Returns true iff one of the parameters is true.
Args:
*args: arguments to check
Returns:
bool: true iff one of the parameters is true.
"""
return len([x for x in args if x]) == 1
def full_to_type_name(full_resource_name):
"""Creates a type/name format from full resource name.
Args:
full_resource_name (str): the full_resource_name of the resource
Returns:
str: type_name of that resource
"""
return '/'.join(full_resource_name.split('/')[-2:])
def to_full_resource_name(full_parent_name, resource_type_name):
"""Creates a full resource name by parent full name and type name.
Args:
full_parent_name (str): the full_resource_name of the parent
resource_type_name (str): the full_resource_name of the child
Returns:
str: full_resource_name of the child
"""
# Strip out the fake composite root parent from the full resource name.
if full_parent_name == 'composite_root/root/':
return '{}/'.format(resource_type_name)
# For resource names that contain embedded /s, set the full type name
# to just the first and last part.
type_name_parts = resource_type_name.split('/')
if len(type_name_parts) > 2:
resource_type_name = '{}/{}'.format(type_name_parts[0],
type_name_parts[-1])
return '{}{}/'.format(full_parent_name, resource_type_name)
def to_type_name(resource_type, resource_name):
"""Creates a type/name from type and name.
Args:
resource_type (str): the resource type
resource_name (str): the resource name
Returns:
str: type_name of the resource
"""
return '{}/{}'.format(resource_type, resource_name)
def split_type_name(resource_type_name):
"""Split the type name of the resource
Args:
resource_type_name (str): the type_name of the resource
Returns:
tuples: type and name of the resource
"""
return resource_type_name.split('/')
def resource_to_type_name(resource):
"""Creates a type/name format from a resource dbo.
Args:
resource (object): the resource to get the the type_name
Returns:
str: type_name of the resource
"""
return resource.type_name
def get_sql_dialect(session):
"""Return the active SqlAlchemy dialect.
Args:
session (object): the session to check for SqlAlchemy dialect
Returns:
str: name of the SqlAlchemy dialect
"""
return session.bind.dialect.name
def get_resources_from_full_name(full_name):
"""Parse resource info from full name.
Args:
full_name (str): Full name of the resource in hierarchical format.
Example of a full_name:
organization/88888/project/myproject/firewall/99999/
full_name has a trailing / that needs to be removed.
Yields:
iterator: strings of resource_type and resource_id
"""
full_name_parts = full_name.split('/')[:-1]
full_name_parts.reverse()
resource_iter = iter(full_name_parts)
for resource_id, resource_type in zip(resource_iter, resource_iter):
yield resource_type, resource_id
def get_resource_id_from_type_name(type_name):
"""Returns the key from type_name.
Args:
type_name (str): Type name.
Returns:
str: Resource id.
"""
if '/' in type_name:
return type_name.split('/')[-1]
return type_name
| {
"content_hash": "8fec345ddd11f8b83319a34357421a7f",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 75,
"avg_line_length": 24.140625,
"alnum_prop": 0.5904530744336569,
"repo_name": "forseti-security/forseti-security",
"id": "716df67c64b1cf906cb821e06cb4fb147b2d7b55",
"size": "6794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/forseti/services/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3652"
},
{
"name": "HCL",
"bytes": "37409"
},
{
"name": "JavaScript",
"bytes": "1833"
},
{
"name": "Jinja",
"bytes": "6379"
},
{
"name": "Makefile",
"bytes": "5427"
},
{
"name": "Open Policy Agent",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "4140122"
},
{
"name": "Ruby",
"bytes": "37434"
},
{
"name": "Shell",
"bytes": "17062"
}
],
"symlink_target": ""
} |
"""
Test quoting of arguments to lldb commands
"""
from __future__ import print_function
import os
import re
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SettingsCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@classmethod
def classCleanup(cls):
"""Cleanup the test byproducts."""
cls.RemoveTempFile("stdout.txt")
@no_debug_info_test
def test_no_quote(self):
self.do_test_args("a b c", "a\0b\0c\0")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24557")
@no_debug_info_test
def test_single_quote(self):
self.do_test_args("'a b c'", "a b c\0")
@no_debug_info_test
def test_double_quote(self):
self.do_test_args('"a b c"', "a b c\0")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24557")
@no_debug_info_test
def test_single_quote_escape(self):
self.do_test_args("'a b\\' c", "a b\\\0c\0")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24557")
@no_debug_info_test
def test_double_quote_escape(self):
self.do_test_args('"a b\\" c"', 'a b" c\0')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24557")
@no_debug_info_test
def test_double_quote_escape2(self):
self.do_test_args('"a b\\\\" c', 'a b\\\0c\0')
@no_debug_info_test
def test_single_in_double(self):
self.do_test_args('"a\'b"', "a'b\0")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24557")
@no_debug_info_test
def test_double_in_single(self):
self.do_test_args("'a\"b'", 'a"b\0')
@no_debug_info_test
def test_combined(self):
self.do_test_args('"a b"c\'d e\'', 'a bcd e\0')
@no_debug_info_test
def test_bare_single(self):
self.do_test_args("a\\'b", "a'b\0")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24557")
@no_debug_info_test
def test_bare_double(self):
self.do_test_args('a\\"b', 'a"b\0')
def do_test_args(self, args_in, args_out):
"""Test argument parsing. Run the program with args_in. The program dumps its arguments
to stdout. Compare the stdout with args_out."""
self.buildDefault()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
local_outfile = self.getBuildArtifact("output.txt")
if lldb.remote_platform:
remote_outfile = "output.txt" # Relative to platform's PWD
else:
remote_outfile = local_outfile
self.runCmd("process launch -- %s %s" %(remote_outfile, args_in))
if lldb.remote_platform:
src_file_spec = lldb.SBFileSpec(remote_outfile, False)
dst_file_spec = lldb.SBFileSpec(local_outfile, True)
lldb.remote_platform.Get(src_file_spec, dst_file_spec)
with open(local_outfile, 'r') as f:
output = f.read()
self.RemoveTempFile(local_outfile)
self.assertEqual(output, args_out)
| {
"content_hash": "3237411f468ffc7ede5bb4a343a79752",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 95,
"avg_line_length": 30.436893203883496,
"alnum_prop": 0.6220095693779905,
"repo_name": "youtube/cobalt",
"id": "9265b47962b1bcfc71a20c549759c7901312ecb3",
"size": "3135",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/settings/quoting/TestQuoting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import mock
from nose.tools import * # noqa
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
from website.addons.zotero.tests.factories import ZoteroAccountFactory
from website.addons.zotero.provider import ZoteroCitationsProvider
from pyzotero.zotero_errors import UserNotAuthorised
from framework.exceptions import HTTPError
from website.addons.zotero import model
class ZoteroProviderTestCase(OsfTestCase):
def setUp(self):
super(ZoteroProviderTestCase, self).setUp()
self.provider = model.Zotero()
def test_handle_callback(self):
mock_response = {
'userID': 'Fake User ID',
'username': 'Fake User Name',
}
res = self.provider.handle_callback(mock_response)
assert_equal(res.get('display_name'), 'Fake User Name')
assert_equal(res.get('provider_id'), 'Fake User ID')
def test_citation_lists(self):
mock_client = mock.Mock()
mock_folders = [
{
'data': {
'name': 'Fake Folder',
'key': 'Fake Key',
}
}
]
mock_client.collections.return_value = mock_folders
self.provider._client = mock_client
mock_account = mock.Mock()
self.provider.account = mock_account
res = self.provider.citation_lists(ZoteroCitationsProvider()._extract_folder)
assert_equal(
res[1]['name'],
'Fake Folder'
)
assert_equal(
res[1]['id'],
'Fake Key'
)
class ZoteroNodeSettingsTestCase(OsfTestCase):
def setUp(self):
super(ZoteroNodeSettingsTestCase, self).setUp()
self.node = ProjectFactory()
self.node_settings = model.ZoteroNodeSettings(owner=self.node)
self.node_settings.save()
self.user = self.node.creator
self.user_settings = self.user.get_or_add_addon('zotero')
def tearDown(self):
super(ZoteroNodeSettingsTestCase, self).tearDown()
self.user_settings.remove()
self.node_settings.remove()
self.node.remove()
self.user.remove()
@mock.patch('website.addons.zotero.model.Zotero')
def test_api_not_cached(self, mock_zotero):
# The first call to .api returns a new object
api = self.node_settings.api
mock_zotero.assert_called_once()
assert_equal(api, mock_zotero())
@mock.patch('website.addons.zotero.model.Zotero')
def test_api_cached(self, mock_zotero):
# Repeated calls to .api returns the same object
self.node_settings._api = 'testapi'
api = self.node_settings.api
assert_false(mock_zotero.called)
assert_equal(api, 'testapi')
def test_set_auth(self):
external_account = ZoteroAccountFactory()
self.user.external_accounts.append(external_account)
self.user.save()
# this should be reset after the call
self.node_settings.zotero_list_id = 'anything'
self.node_settings.set_auth(
external_account=external_account,
user=self.user
)
# this instance is updated
assert_equal(
self.node_settings.external_account,
external_account
)
assert_equal(
self.node_settings.user_settings,
self.user_settings
)
assert_is_none(
self.node_settings.zotero_list_id
)
# user_settings was updated
# TODO: The call to grant_oauth_access in set_auth should be mocked
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=external_account,
)
)
def test_set_auth_wrong_user(self):
external_account = ZoteroAccountFactory()
self.user.external_accounts.append(external_account)
self.user.save()
with assert_raises(PermissionsError):
self.node_settings.set_auth(
external_account=external_account,
user=UserFactory()
)
def test_deauthorize(self):
self.node_settings.external_account = ZoteroAccountFactory()
self.node_settings.zotero_list_id = 'something'
self.node_settings.user_settings = self.user_settings
self.node_settings.save()
assert_true(self.node_settings.zotero_list_id)
self.node_settings.deauthorize(auth=Auth(self.user))
self.node_settings.save()
assert_is(self.node_settings.user_settings, None)
assert_is(self.node_settings.zotero_list_id, None)
last_log = self.node.logs[-1]
assert_equal(last_log.action, 'zotero_node_deauthorized')
params = last_log.params
assert_in('node', params)
assert_in('project', params)
def test_clear_auth(self):
self.node_settings.external_account = ZoteroAccountFactory()
self.node_settings.zotero_list_id = 'something'
self.node_settings.user_settings = self.user_settings
self.node_settings.save()
self.node_settings.clear_auth()
assert_is_none(self.node_settings.external_account)
assert_is_none(self.node_settings.zotero_list_id)
assert_is_none(self.node_settings.user_settings)
def test_set_target_folder(self):
folder_id = 'fake-folder-id'
folder_name = 'fake-folder-name'
external_account = ZoteroAccountFactory()
self.user.external_accounts.append(external_account)
self.user.save()
self.node_settings.set_auth(
external_account=external_account,
user=self.user
)
assert_is_none(self.node_settings.zotero_list_id)
self.node_settings.set_target_folder(
folder_id,
folder_name,
auth=Auth(user=self.user),
)
# instance was updated
assert_equal(
self.node_settings.zotero_list_id,
'fake-folder-id',
)
# user_settings was updated
# TODO: the call to grant_oauth_access should be mocked
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=external_account,
metadata={'folder': 'fake-folder-id'}
)
)
log = self.node.logs[-1]
assert_equal(log.action, 'zotero_folder_selected')
assert_equal(log.params['folder_id'], folder_id)
assert_equal(log.params['folder_name'], folder_name)
def test_has_auth_false(self):
external_account = ZoteroAccountFactory()
assert_false(self.node_settings.has_auth)
# both external_account and user_settings must be set to have auth
self.node_settings.external_account = external_account
assert_false(self.node_settings.has_auth)
self.node_settings.external_account = None
self.node_settings.user_settings = self.user_settings
assert_false(self.node_settings.has_auth)
# set_auth must be called to have auth
self.node_settings.external_account = external_account
self.node_settings.user_settings = self.user_settings
assert_false(self.node_settings.has_auth)
def test_has_auth_true(self):
external_account = ZoteroAccountFactory()
self.user.external_accounts.append(external_account)
self.node_settings.set_auth(external_account, self.user)
# zotero_list_id should have no effect
self.node_settings.zotero_list_id = None
assert_true(self.node_settings.has_auth)
# zotero_list_id should have no effect
self.node_settings.zotero_list_id = 'totally fake ID'
assert_true(self.node_settings.has_auth)
def test_selected_folder_name_root(self):
self.node_settings.zotero_list_id = 'ROOT'
assert_equal(
self.node_settings.selected_folder_name,
"All Documents"
)
def test_selected_folder_name_empty(self):
self.node_settings.zotero_list_id = None
assert_equal(
self.node_settings.selected_folder_name,
''
)
@mock.patch('website.addons.zotero.model.Zotero._folder_metadata')
def test_selected_folder_name(self, mock_folder_metadata):
# Mock the return from api call to get the folder's name
mock_folder = {'data': {'name': 'Fake Folder'}}
# Add the mocked return object to the mocked api client
mock_folder_metadata.return_value = mock_folder
self.node_settings.zotero_list_id = 'fake-list-id'
assert_equal(
self.node_settings.selected_folder_name,
'Fake Folder'
)
class ZoteroUserSettingsTestCase(OsfTestCase):
def _prep_oauth_case(self):
self.node = ProjectFactory()
self.user = self.node.creator
self.external_account = ZoteroAccountFactory()
self.user.external_accounts.append(self.external_account)
self.user.save()
self.user_settings = self.user.get_or_add_addon('zotero')
def test_grant_oauth_access_no_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_equal(
self.user_settings.oauth_grants,
{self.node._id: {self.external_account._id: {}}},
)
def test_grant_oauth_access_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_equal(
self.user_settings.oauth_grants,
{
self.node._id: {
self.external_account._id: {'folder': 'fake_folder_id'}
},
}
)
def test_verify_oauth_access_no_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=ZoteroAccountFactory()
)
)
def test_verify_oauth_access_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'another_folder_id'}
)
)
| {
"content_hash": "08bdf66ed37241a96dd2c3bf1d22a386",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 85,
"avg_line_length": 31.60326086956522,
"alnum_prop": 0.6029234737747206,
"repo_name": "Ghalko/osf.io",
"id": "52db8c8b90069cfa7409c7822f07fcd770f92506",
"size": "11655",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "website/addons/zotero/tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "131914"
},
{
"name": "HTML",
"bytes": "49734"
},
{
"name": "JavaScript",
"bytes": "1365776"
},
{
"name": "Mako",
"bytes": "615025"
},
{
"name": "Python",
"bytes": "4647431"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
} |
"""
These are internal helpers. Do not rely on their presence.
"""
from __future__ import absolute_import, unicode_literals
from distutils.version import LooseVersion
from django import get_version
from django.template.loader import render_to_string
if LooseVersion(get_version()) < LooseVersion('1.10'):
def ct_render_to_string(template, ctx, **kwargs):
from django.template import RequestContext
context_instance = kwargs.get('context')
if context_instance is None and kwargs.get('request'):
context_instance = RequestContext(kwargs['request'])
return render_to_string(
template,
ctx,
context_instance=context_instance)
else:
def ct_render_to_string(template, ctx, **kwargs):
return render_to_string(
template,
ctx,
request=kwargs.get('request'))
| {
"content_hash": "bba8c0e1c06091a689e5f3c6bcd43e45",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 64,
"avg_line_length": 30.586206896551722,
"alnum_prop": 0.657271702367531,
"repo_name": "matthiask/feincms-elephantblog",
"id": "74c6309f2d6dc195da29a61c444403622c377e6a",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elephantblog/_internal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8141"
},
{
"name": "Python",
"bytes": "83985"
},
{
"name": "Shell",
"bytes": "134"
}
],
"symlink_target": ""
} |
import re, os, platform, urllib, cgi
PLUGIN_LOG_TITLE='Staxus' # Log Title
VERSION_NO = '2017.07.26.0'
# Delay used when requesting HTML, may be good to have to prevent being
# banned from the site
REQUEST_DELAY = 0
# URLS
BASE_URL='http://staxus.com%s'
# Example Video Details URL
# http://staxus.com/trial/gallery.php?id=4044
BASE_VIDEO_DETAILS_URL='http://staxus.com/trial/%s'
# Example Search URL:
# http://staxus.com/trial/search.php?query=Staxus+Classic%3A+BB+Skate+Rave+-+Scene+1+-+Remastered+in+HD
BASE_SEARCH_URL='http://staxus.com/trial/search.php?st=advanced&qall=%s'
# File names to match for this agent
file_name_pattern = re.compile(Prefs['regex'])
def Start():
HTTP.CacheTime = CACHE_1WEEK
HTTP.Headers['User-agent'] = 'Mozilla/4.0 (compatible; MSIE 8.0; ' \
'Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' \
'.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)'
class Staxus(Agent.Movies):
name = 'Staxus'
languages = [Locale.Language.NoLanguage, Locale.Language.English]
primary_provider = False
fallback_agent = ['com.plexapp.agents.gayporncollector']
contributes_to = ['com.plexapp.agents.cockporn']
def Log(self, message, *args):
if Prefs['debug']:
Log(PLUGIN_LOG_TITLE + ' - ' + message, *args)
def search(self, results, media, lang, manual):
self.Log('-----------------------------------------------------------------------')
self.Log('SEARCH CALLED v.%s', VERSION_NO)
self.Log('SEARCH - Platform: %s %s', platform.system(), platform.release())
self.Log('SEARCH - media.title - %s', media.title)
self.Log('SEARCH - media.items[0].parts[0].file - %s', media.items[0].parts[0].file)
self.Log('SEARCH - media.primary_metadata.title - %s', media.primary_metadata.title)
self.Log('SEARCH - media.items - %s', media.items)
self.Log('SEARCH - media.filename - %s', media.filename)
self.Log('SEARCH - lang - %s', lang)
self.Log('SEARCH - manual - %s', manual)
self.Log('SEARCH - Prefs->cover - %s', Prefs['cover'])
self.Log('SEARCH - Prefs->folders - %s', Prefs['folders'])
self.Log('SEARCH - Prefs->regex - %s', Prefs['regex'])
if not media.items[0].parts[0].file:
return
path_and_file = media.items[0].parts[0].file.lower()
self.Log('SEARCH - File Path: %s', path_and_file)
enclosing_directory, file_name = os.path.split(os.path.splitext(path_and_file)[0])
enclosing_directory, enclosing_folder = os.path.split(enclosing_directory)
self.Log('SEARCH - Enclosing Folder: %s', enclosing_folder)
if Prefs['folders'] != "*":
folder_list = re.split(',\s*', Prefs['folders'].lower())
if final_dir not in folder_list:
self.Log('SEARCH - Skipping %s because the folder %s is not in the acceptable folders list: %s', basename, final_dir, ','.join(folder_list))
return
m = file_name_pattern.search(basename)
if not m:
self.Log('SEARCH - Skipping %s because the file name is not in the expected format.', basename)
return
self.Log('SEARCH - File Name: %s', file_name)
self.Log('SEARCH - Split File Name: %s', file_name.split(' '))
remove_words = file_name.lower() #Sets string to lower.
remove_words = remove_words.replace('staxus', '') #Removes word.
remove_words = re.sub('\(([^\)]+)\)', '', remove_words) #Removes anything inside of () and the () themselves.
remove_words = remove_words.lstrip(' ') #Removes white spaces on the left end.
remove_words = remove_words.rstrip(' ') #Removes white spaces on the right end.
search_query_raw = list()
# Process the split filename to remove words with special characters. This is to attempt to find a match with the limited search function(doesn't process any non-alphanumeric characters correctly)
for piece in remove_words.split(' '):
search_query_raw.append(cgi.escape(piece))
search_query="%2C+".join(search_query_raw)
self.Log('SEARCH - Search Query: %s', search_query)
html=HTML.ElementFromURL(BASE_SEARCH_URL % search_query, sleep=REQUEST_DELAY)
search_results=html.xpath('//*[@class="item"]')
score=10
self.Log('SEARCH - results size: %s', len(search_results))
# Enumerate the search results looking for an exact match. The hope is that by eliminating special character words from the title and searching the remainder that we will get the expected video in the results.
for result in search_results:
#result=result.find('')
video_title=result.findall("div/a/img")[0].get("alt")
video_title = video_title.lstrip(' ') #Removes white spaces on the left end.
video_title = video_title.rstrip(' ') #Removes white spaces on the right end.
self.Log('SEARCH - video title: %s', video_title)
# Check the alt tag which includes the full title with special characters against the video title. If we match we nominate the result as the proper metadata. If we don't match we reply with a low score.
if video_title.lower().replace(':','') == file_name.lower():
video_url=result.findall("div/a")[0].get('href')
self.Log('SEARCH - video url: %s', video_url)
image_url=result.findall("div/a/img")[0].get("src")
self.Log('SEARCH - image url: %s', image_url)
self.Log('SEARCH - Exact Match "' + file_name.lower() + '" == "%s"' % video_title.lower())
results.Append(MetadataSearchResult(id = video_url, name = video_title, score = 100, lang = lang))
else:
self.Log('SEARCH - Title not found "' + file_name.lower() + '" != "%s"' % video_title.lower())
score=score-1
results.Append(MetadataSearchResult(id = '', name = media.filename, score = score, lang = lang))
def update(self, metadata, media, lang, force=False):
self.Log('UPDATE CALLED')
if not media.items[0].parts[0].file:
return
file_path = media.items[0].parts[0].file
self.Log('UPDATE - File Path: %s', file_path)
self.Log('UPDATE - Video URL: %s', metadata.id)
url = BASE_VIDEO_DETAILS_URL % metadata.id
# Fetch HTML
html = HTML.ElementFromURL(url, sleep=REQUEST_DELAY)
# Set tagline to URL
metadata.tagline = url
video_title = html.xpath('//div[@class="sidebar right sidebar-models"]/h2/text()')[0]
self.Log('UPDATE - video_title: "%s"' % video_title)
valid_image_names = list()
i = 0
video_image_list = html.xpath('//*[@class="reset collection-images"]/li/a/img')
try:
coverPrefs = Prefs['cover']
for image in video_image_list:
if i != coverPrefs or coverPrefs == "all available":
thumb_url = image.get('src')
#self.Log('UPDATE - thumb_url: "%s"' % thumb_url)
poster_url = thumb_url.replace('300h', '1920w')
#self.Log('UPDATE - poster_url: "%s"' % poster_url)
valid_image_names.append(poster_url)
if poster_url not in metadata.posters:
try:
i += 1
metadata.posters[poster_url]=Proxy.Preview(HTTP.Request(thumb_url), sort_order = i)
except: pass
except Exception as e:
self.Log('UPDATE - Error getting posters: %s', e)
pass
# Try to get description text.
try:
raw_about_text=html.xpath('//div[@class="col-main"]/p')
self.Log('UPDATE - About Text - RAW %s', raw_about_text)
about_text=' '.join(str(x.text_content().strip()) for x in raw_about_text)
metadata.summary=about_text
except Exception as e:
self.Log('UPDATE - Error getting description text: %s', e)
pass
# Try to get release date.
try:
rd=html.xpath('//div[@class="sidebar right sidebar-models"]/p[1]/span/text()')[0]
rd = rd.split('/')
rd = [rd[i] for i in [1,0,2]]
rd[1] = rd[1] + ', '
rd[0] = rd[0] + " "
rd=''.join(rd)
self.Log('UPDATE - Release Date: %s', rd)
metadata.originally_available_at = Datetime.ParseDate(rd).date()
metadata.year = metadata.originally_available_at.year
except Exception as e:
self.Log('UPDATE - Error getting release date: %s', e)
pass
# Try to get and process the video cast.
try:
metadata.roles.clear()
htmlcast = html.xpath('//div[@class="sidebar right sidebar-models"]/p[4]/a/text()')
self.Log('UPDATE - cast: "%s"' % htmlcast)
for cast in htmlcast:
cname = cast.strip()
if (len(cname) > 0):
role = metadata.roles.new()
role.name = cname
except Exception as e:
self.Log('UPDATE - Error getting video cast: %s', e)
pass
# Try to get and process the video genres.
try:
metadata.genres.clear()
genres = html.xpath('//div[@class="sidebar right sidebar-models"]/p[3]/span/a/text()')
self.Log('UPDATE - video_genres: "%s"' % genres)
for genre in genres:
genre = genre.strip()
if (len(genre) > 0):
metadata.genres.add(genre)
except Exception as e:
self.Log('UPDATE - Error getting video genres: %s', e)
pass
# Try to get and process the ratings.
try:
rating = html.xpath('//div[@class="col-md-4 col-xs-12 stats-single"]/b/text()')[0].strip()
rating_count = html.xpath('//div[@class="col-md-4 col-xs-12 stats-single"]//strong/text()')[0]
rating_count = rating_count.replace('(Total votes: ', '')
rating_count = rating_count.replace(')', '')
self.Log('UPDATE - video_rating: "%s"', rating)
self.Log('UPDATE - video_rating_count: "%s"', rating_count)
metadata.rating = float(rating)*2
metadata.rating_count = int(rating_count)
except Exception as e:
self.Log('UPDATE - Error getting rating: %s', e)
pass
metadata.posters.validate_keys(valid_image_names)
metadata.content_rating = 'X'
metadata.title = video_title
metadata.studio = "Staxus"
| {
"content_hash": "079db8f2f15a0c0da798e9c288396553",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 211,
"avg_line_length": 42.25550660792952,
"alnum_prop": 0.6456422018348624,
"repo_name": "iklier/plex-gay-metadata-agent",
"id": "734c14a721f12d982306b71f4d1fffe2c99ddb5e",
"size": "9602",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Staxus.bundle/Contents/Code/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65492"
}
],
"symlink_target": ""
} |
from ConfigParser import ConfigParser
def get_cfg():
cfg = ConfigParser()
cfg.read('configuration.ini')
return cfg
def api_info(a):
cfg = get_cfg()
items = dict(cfg.items(a))
return items
def get_api_item(a,b):
cfg = get_cfg()
item = cfg.get(a,b)
return item
def get_proxies():
cfg = get_cfg()
proxies = dict(cfg.items('proxy'))
if len(proxies.keys())==0:
return None
first = proxies.keys()[0]
if proxies[first] == "none":
return None
return proxies
| {
"content_hash": "8bd7583d540d6ce6bc0f7c73a3e90740",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 38,
"avg_line_length": 20.384615384615383,
"alnum_prop": 0.5962264150943396,
"repo_name": "nramanan/nellie-bot",
"id": "fe92edbd08a03f24fb7485f7f18c7caf6808077f",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/config_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2232"
},
{
"name": "HTML",
"bytes": "5180"
},
{
"name": "Python",
"bytes": "18692"
}
],
"symlink_target": ""
} |
from unittest.mock import patch, MagicMock
from flask import url_for
from tests.base import SampleFrontendTestBase
class BpFeaturesTestCase(SampleFrontendTestBase):
def test_bustimes_reachable(self):
mock = MagicMock()
with patch('sipa.blueprints.features.get_bustimes', mock):
resp = self.client.get(url_for('features.bustimes'))
self.assert200(resp)
self.assertTemplateUsed("bustimes.html")
self.assertTrue(mock.called)
| {
"content_hash": "802d99378a23dfd1eb8a9facd2f990aa",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 30.1875,
"alnum_prop": 0.7122153209109731,
"repo_name": "agdsn/sipa",
"id": "4ecd31751ae404d14b228b33de744e769e2c15d9",
"size": "483",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/blueprints/test_bp_features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17185"
},
{
"name": "Dockerfile",
"bytes": "840"
},
{
"name": "HTML",
"bytes": "57259"
},
{
"name": "JavaScript",
"bytes": "252921"
},
{
"name": "Makefile",
"bytes": "947"
},
{
"name": "Python",
"bytes": "300232"
},
{
"name": "Shell",
"bytes": "4193"
}
],
"symlink_target": ""
} |
"""django-accounts setup."""
from __future__ import print_function, unicode_literals
from setuptools import setup, find_packages
import accounts
setup(
name='accounts',
version=accounts.__version__,
author='Julio Vicente Trigo Guijarro',
author_email='',
url='http://github.com/juliotrigo/django-accounts/',
description='A Django app to manage user accounts.',
long_description=accounts.__doc__,
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Natural Language :: Spanish',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules'
],
license='BSD',
platforms='any',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
)
| {
"content_hash": "73c9fff4ca427b69ea679fcc215cc565",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 70,
"avg_line_length": 31.405405405405407,
"alnum_prop": 0.6239242685025818,
"repo_name": "juliotrigo/django-accounts",
"id": "9779c7924826cf1dee3b18787fa8ec7901fdc1fc",
"size": "1187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7441"
}
],
"symlink_target": ""
} |
class Credentials(object):
# Rely on default gcloud client credential resolution instead of
# reifying credentials in WAL-E.
pass
| {
"content_hash": "0204d8a9cfd74c712345a153dba973ab",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 68,
"avg_line_length": 35.5,
"alnum_prop": 0.7394366197183099,
"repo_name": "tenstartups/wal-e",
"id": "a48af6b5c5ecde29676e98f5df518d82508a6d6e",
"size": "142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wal_e/blobstore/gs/credentials.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "986"
},
{
"name": "Python",
"bytes": "359283"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from ..models import Staff
class StaffTests(TestCase):
def setUp(self):
Staff.objects.create(
name='Casper', address='China')
Staff.objects.create(
name='Muffin', address='Malasia')
def test_address(self):
casper = Staff.objects.get(name='Casper')
muffin = Staff.objects.get(name='Muffin')
self.assertEqual(
casper.address, "China")
self.assertEqual(
muffin.address, "Malasia")
| {
"content_hash": "391cca84deb7ff2c4a5d4c47343fe05b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 49,
"avg_line_length": 26,
"alnum_prop": 0.6038461538461538,
"repo_name": "njj6666/forkmelon",
"id": "4acb01b7168aa7a15b5379b84a42408ae578266d",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-rest-framework/tutorial/snippets/tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11126"
},
{
"name": "HTML",
"bytes": "87181"
},
{
"name": "Java",
"bytes": "23426"
},
{
"name": "JavaScript",
"bytes": "244289"
},
{
"name": "Python",
"bytes": "40179"
},
{
"name": "Shell",
"bytes": "388"
},
{
"name": "Swift",
"bytes": "5607"
},
{
"name": "TypeScript",
"bytes": "18190"
}
],
"symlink_target": ""
} |
from .core import StructuredNode, db
from .properties import AliasProperty
from .exception import MultipleNodesReturned
import inspect
import re
OUTGOING, INCOMING, EITHER = 1, -1, 0
# basestring python 3.x fallback
try:
basestring
except NameError:
basestring = str
def rel_helper(lhs, rhs, ident=None, relation_type=None, direction=None, **kwargs):
"""
Generate a relationship matching string, with specified parameters.
Examples:
relation_direction = OUTGOING: (lhs)-[relation_ident:relation_type]->(rhs)
relation_direction = INCOMING: (lhs)<-[relation_ident:relation_type]-(rhs)
relation_direction = EITHER: (lhs)-[relation_ident:relation_type]-(rhs)
:param lhs: The left hand statement.
:type lhs: str
:param rhs: The right hand statement.
:type rhs: str
:param ident: A specific identity to name the relationship, or None.
:type ident: str
:param relation_type: None for all direct rels, * for all of any length, or a name of an explicit rel.
:type relation_type: str
:param direction: None or EITHER for all OUTGOING,INCOMING,EITHER. Otherwise OUTGOING or INCOMING.
:rtype: str
"""
if direction == OUTGOING:
stmt = '-{0}->'
elif direction == INCOMING:
stmt = '<-{0}-'
else:
stmt = '-{0}-'
# direct, relation_type=None is unspecified, relation_type
if relation_type is None:
stmt = stmt.format('')
# all("*" wildcard) relation_type
elif relation_type == '*':
stmt = stmt.format('[*]')
else:
# explicit relation_type
stmt = stmt.format('[%s:%s]' % (ident if ident else '', relation_type))
return "({0}){1}({2})".format(lhs, stmt, rhs)
# special operators
_SPECIAL_OPERATOR_IN = 'IN'
_SPECIAL_OPERATOR_INSESITIVE = '(?i)'
_SPECIAL_OPERATOR_ISNULL = 'IS NULL'
_SPECIAL_OPERATOR_ISNOTNULL = 'IS NOT NULL'
_SPECIAL_OPERATOR_REGEX = '=~'
_UNARY_OPERATORS = (_SPECIAL_OPERATOR_ISNULL, _SPECIAL_OPERATOR_ISNOTNULL)
_REGEX_INSESITIVE = _SPECIAL_OPERATOR_INSESITIVE + '{}'
_REGEX_CONTAINS = '.*{}.*'
_REGEX_STARTSWITH = '{}.*'
_REGEX_ENDSWITH = '.*{}'
# regex operations that require escaping
_STRING_REGEX_OPERATOR_TABLE = {
'iexact': _REGEX_INSESITIVE,
'contains': _REGEX_CONTAINS,
'icontains': _SPECIAL_OPERATOR_INSESITIVE + _REGEX_CONTAINS,
'startswith': _REGEX_STARTSWITH,
'istartswith': _SPECIAL_OPERATOR_INSESITIVE + _REGEX_STARTSWITH,
'endswith': _REGEX_ENDSWITH,
'iendswith': _SPECIAL_OPERATOR_INSESITIVE + _REGEX_ENDSWITH,
}
# regex operations that do not require escaping
_REGEX_OPERATOR_TABLE = {
'iregex': _REGEX_INSESITIVE,
}
# list all regex operations, these will require formatting of the value
_REGEX_OPERATOR_TABLE.update(_STRING_REGEX_OPERATOR_TABLE)
# list all supported operators
OPERATOR_TABLE = {
'lt': '<',
'gt': '>',
'lte': '<=',
'gte': '>=',
'ne': '<>',
'in': _SPECIAL_OPERATOR_IN,
'isnull': _SPECIAL_OPERATOR_ISNULL,
'regex': _SPECIAL_OPERATOR_REGEX,
'exact': '='
}
# add all regex operators
OPERATOR_TABLE.update(_REGEX_OPERATOR_TABLE)
def install_traversals(cls, node_set):
"""
from a StructuredNode class install Traversal objects for each
relationship definition on a NodeSet instance
"""
rels = cls.defined_properties(rels=True, aliases=False, properties=False)
for key, value in rels.items():
if hasattr(node_set, key):
raise ValueError("Can't install traversal '{}' exists on NodeSet".format(key))
rel = getattr(cls, key)
rel._lookup_node_class()
traversal = Traversal(source=node_set, key=key, definition=rel.definition)
setattr(node_set, key, traversal)
def process_filter_args(cls, kwargs):
"""
loop through properties in filter parameters check they match class definition
deflate them and convert into something easy to generate cypher from
"""
output = {}
for key, value in kwargs.items():
if '__' in key:
prop, operator = key.split('__')
operator = OPERATOR_TABLE[operator]
else:
prop = key
operator = '='
if prop not in cls.defined_properties(rels=False):
raise ValueError("No such property {} on {}".format(prop, cls.__name__))
property_obj = getattr(cls, prop)
if isinstance(property_obj, AliasProperty):
prop = property_obj.aliased_to()
deflated_value = getattr(cls, prop).deflate(value)
else:
# handle special operators
if operator == _SPECIAL_OPERATOR_IN:
if not isinstance(value, tuple) and not isinstance(value, list):
raise ValueError('Value must be a tuple or list for IN operation {}={}'.format(key, value))
deflated_value = [property_obj.deflate(v) for v in value]
elif operator == _SPECIAL_OPERATOR_ISNULL:
if not isinstance(value, bool):
raise ValueError('Value must be a bool for isnull operation on {}'.format(key))
operator = 'IS NULL' if value else 'IS NOT NULL'
deflated_value = None
elif operator in _REGEX_OPERATOR_TABLE.values():
deflated_value = property_obj.deflate(value)
if not isinstance(deflated_value, basestring):
raise ValueError('Must be a string value for {}'.format(key))
if operator in _STRING_REGEX_OPERATOR_TABLE.values():
deflated_value = re.escape(deflated_value)
deflated_value = operator.format(deflated_value)
operator = _SPECIAL_OPERATOR_REGEX
else:
deflated_value = property_obj.deflate(value)
# map property to correct property name in the database
db_property = cls.defined_properties(rels=False)[prop].db_property or prop
output[db_property] = (operator, deflated_value)
return output
def process_has_args(cls, kwargs):
"""
loop through has parameters check they correspond to class rels defined
"""
rel_definitions = cls.defined_properties(properties=False, rels=True, aliases=False)
match, dont_match = {}, {}
for key, value in kwargs.items():
if key not in rel_definitions:
raise ValueError("No such relation {} defined on a {}".format(key, cls.__name__))
rhs_ident = key
rel_definitions[key]._lookup_node_class()
if value is True:
match[rhs_ident] = rel_definitions[key].definition
elif value is False:
dont_match[rhs_ident] = rel_definitions[key].definition
elif isinstance(value, NodeSet):
raise NotImplementedError("Not implemented yet")
else:
raise ValueError("Expecting True / False / NodeSet got: " + repr(value))
return match, dont_match
class QueryBuilder(object):
def __init__(self, node_set):
self.node_set = node_set
self._ast = {'match': [], 'where': []}
self._query_params = {}
self._place_holder_registry = {}
self._ident_count = 0
def build_ast(self):
self.build_source(self.node_set)
if hasattr(self.node_set, 'skip'):
self._ast['skip'] = self.node_set.skip
if hasattr(self.node_set, 'limit'):
self._ast['limit'] = self.node_set.limit
return self
def build_source(self, source):
if isinstance(source, Traversal):
return self.build_traversal(source)
elif isinstance(source, NodeSet):
if inspect.isclass(source.source) and issubclass(source.source, StructuredNode):
ident = self.build_label(source.source.__label__.lower(), source.source)
else:
ident = self.build_source(source.source)
self.build_additional_match(ident, source)
if hasattr(source, '_order_by'):
self.build_order_by(ident, source)
if source.filters:
self.build_where_stmt(ident, source.filters)
return ident
elif isinstance(source, StructuredNode):
return self.build_node(source)
else:
raise ValueError("Unknown source type " + repr(source))
def create_ident(self):
self._ident_count += 1
return 'r' + str(self._ident_count)
def build_order_by(self, ident, source):
self._ast['order_by'] = ['{}.{}'.format(ident, p)
for p in source._order_by]
def build_traversal(self, traversal):
"""
traverse a relationship from a node to a set of nodes
"""
# build source
rhs_label = ':' + traversal.target_class.__label__
# build source
lhs_ident = self.build_source(traversal.source)
rhs_ident = traversal.name + rhs_label
self._ast['return'] = traversal.name
self._ast['result_class'] = traversal.target_class
rel_ident = self.create_ident()
stmt = rel_helper(lhs=lhs_ident, rhs=rhs_ident, ident=rel_ident, **traversal.definition)
self._ast['match'].append(stmt)
if traversal.filters:
self.build_where_stmt(rel_ident, traversal.filters)
return traversal.name
def build_node(self, node):
ident = node.__class__.__name__.lower()
if 'start' not in self._ast:
self._ast['start'] = []
place_holder = self._register_place_holder(ident)
self._ast['start'].append('{} = node({{{}}})'.format(ident, place_holder))
self._query_params[place_holder] = node._id
self._ast['return'] = ident
self._ast['result_class'] = node.__class__
return ident
def build_label(self, ident, cls):
"""
match nodes by a label
"""
ident_w_label = ident + ':' + cls.__label__
self._ast['match'].append('({})'.format(ident_w_label))
self._ast['return'] = ident
self._ast['result_class'] = cls
return ident
def build_additional_match(self, ident, node_set):
"""
handle additional matches supplied by 'has()' calls
"""
# TODO add support for labels
source_ident = ident
for key, value in node_set.must_match.items():
label = ':' + value['node_class'].__label__
if isinstance(value, dict):
stmt = rel_helper(lhs=source_ident, rhs=label, ident='', **value)
self._ast['where'].append(stmt)
elif isinstance(value, tuple):
rel_manager, ns = value
self.add_node_set(ns, key)
for key, val in node_set.dont_match.items():
label = ':' + val['node_class'].__label__
if isinstance(val, dict):
stmt = rel_helper(lhs=source_ident, rhs=label, ident='', **val)
self._ast['where'].append('NOT ' + stmt)
else:
raise ValueError("WTF? " + repr(val))
def _register_place_holder(self, key):
if key in self._place_holder_registry:
self._place_holder_registry[key] += 1
else:
self._place_holder_registry[key] = 1
return key + '_' + str(self._place_holder_registry[key])
def build_where_stmt(self, ident, filters):
"""
construct a where statement from some filters
"""
stmts = []
for row in filters:
negate = False
# pre-process NOT cases as they are nested dicts
if '__NOT__' in row and len(row) == 1:
negate = True
row = row['__NOT__']
for prop, op_and_val in row.items():
op, val = op_and_val
if op in _UNARY_OPERATORS:
# unary operators do not have a parameter
statement = '{} {}.{} {}'.format('NOT' if negate else '', ident, prop, op)
else:
place_holder = self._register_place_holder(ident + '_' + prop)
statement = '{} {}.{} {} {{{}}}'.format('NOT' if negate else '', ident, prop, op, place_holder)
self._query_params[place_holder] = val
stmts.append(statement)
self._ast['where'].append(' AND '.join(stmts))
def build_query(self):
query = ''
if 'start' in self._ast:
query += 'START '
query += ', '.join(self._ast['start'])
query += ' MATCH '
query += ', '.join(self._ast['match'])
if 'where' in self._ast and self._ast['where']:
query += ' WHERE '
query += ' AND '.join(self._ast['where'])
query += ' RETURN ' + self._ast['return']
if 'order_by' in self._ast and self._ast['order_by']:
query += ' ORDER BY '
query += ', '.join(self._ast['order_by'])
if 'skip' in self._ast:
query += ' SKIP {0:d}'.format(self._ast['skip'])
if 'limit' in self._ast:
query += ' LIMIT {0:d}'.format(self._ast['limit'])
return query
def _count(self):
self._ast['return'] = 'count({})'.format(self._ast['return'])
# drop order_by, results in an invalid query
self._ast.pop('order_by', None)
query = self.build_query()
results, _ = db.cypher_query(query, self._query_params)
return int(results[0][0])
def _contains(self, node_id):
# inject id = into ast
ident = self._ast['return']
place_holder = self._register_place_holder(ident + '_contains')
self._ast['where'].append('id({}) = {{{}}}'.format(ident, place_holder))
self._query_params[place_holder] = node_id
return self._count() == 1
def _execute(self):
query = self.build_query()
results, _ = db.cypher_query(query, self._query_params)
if results:
return [self._ast['result_class'].inflate(n[0]) for n in results]
return []
class BaseSet(object):
query_cls = QueryBuilder
def all(self):
return self.query_cls(self).build_ast()._execute()
def __iter__(self):
return (i for i in self.query_cls(self).build_ast()._execute())
def __len__(self):
return self.query_cls(self).build_ast()._count()
def __bool__(self):
return self.query_cls(self).build_ast()._count() > 0
def __nonzero__(self):
return self.query_cls(self).build_ast()._count() > 0
def __contains__(self, obj):
if isinstance(obj, StructuredNode):
if hasattr(obj, '_id'):
return self.query_cls(self).build_ast()._contains(int(obj._id))
raise ValueError("Unsaved node: " + repr(obj))
else:
raise ValueError("Expecting StructuredNode instance")
def __getitem__(self, key):
if isinstance(key, slice):
if key.stop and key.start:
self.limit = key.stop - key.start
self.skip = key.start
elif key.stop:
self.limit = key.stop
elif key.start:
self.skip = key.start
elif isinstance(key, int):
self.skip = key
self.limit = 1
return self.query_cls(self).build_ast()._execute()
class NodeSet(BaseSet):
"""
a set of matched nodes of a single type
source: how to produce the set of nodes
node_cls: what type of nodes are they
"""
def __init__(self, source):
self.source = source # could be a Traverse object or a node class
if isinstance(source, Traversal):
self.source_class = source.target_class
elif inspect.isclass(source) and issubclass(source, StructuredNode):
self.source_class = source
elif isinstance(source, StructuredNode):
self.source_class = source.__class__
else:
raise ValueError("Bad source for nodeset " + repr(source))
# setup Traversal objects using relationship definitions
install_traversals(self.source_class, self)
self.filters = []
# used by has()
self.must_match = {}
self.dont_match = {}
def get(self, **kwargs):
output = process_filter_args(self.source_class, kwargs)
self.filters.append(output)
self.limit = 2
result = self.query_cls(self).build_ast()._execute()
if len(result) > 1:
raise MultipleNodesReturned(repr(kwargs))
elif not result:
raise self.source_class.DoesNotExist(repr(kwargs))
else:
return result[0]
def filter(self, **kwargs):
output = process_filter_args(self.source_class, kwargs)
self.filters.append(output)
return self
def exclude(self, **kwargs):
output = process_filter_args(self.source_class, kwargs)
self.filters.append({'__NOT__': output})
return self
def has(self, **kwargs):
must_match, dont_match = process_has_args(self.source_class, kwargs)
self.must_match.update(must_match)
self.dont_match.update(dont_match)
return self
def order_by(self, *props):
"""
Order by properties. Prepend with minus to do descending. Pass None to
remove ordering.
"""
should_remove = len(props) == 1 and props[0] is None
if not hasattr(self, '_order_by') or should_remove:
self._order_by = []
if should_remove:
return self
for prop in props:
prop = prop.strip()
if prop.startswith('-'):
prop = prop[1:]
desc = True
else:
desc = False
if prop not in self.source_class.defined_properties(rels=False):
raise ValueError("No such property {} on {}".format(
prop, self.source_class.__name__))
property_obj = getattr(self.source_class, prop)
if isinstance(property_obj, AliasProperty):
prop = property_obj.aliased_to()
self._order_by.append(prop + (' DESC' if desc else ''))
return self
class Traversal(BaseSet):
"""
source: start of traversal could be any of: StructuredNode instance, StucturedNode class, NodeSet
definition: relationship definition
"""
def __init__(self, source, key, definition):
self.source = source
if isinstance(source, Traversal):
self.source_class = source.target_class
elif inspect.isclass(source) and issubclass(source, StructuredNode):
self.source_class = source
elif isinstance(source, StructuredNode):
self.source_class = source.__class__
elif isinstance(source, NodeSet):
self.source_class = source.source_class
else:
raise ValueError("Bad source for traversal: {}".format(repr(source)))
self.definition = definition
self.target_class = definition['node_class']
self.name = key
self.filters = []
def match(self, **kwargs):
if 'model' not in self.definition:
raise ValueError("match() only available on relationships with a model")
if kwargs:
self.filters.append(process_filter_args(self.definition['model'], kwargs))
return self
def _in_node_set(self):
return NodeSet(self)
| {
"content_hash": "40a9eb1b91445c005b89844b6a1272eb",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 115,
"avg_line_length": 34.638297872340424,
"alnum_prop": 0.5789312039312039,
"repo_name": "pombredanne/neomodel",
"id": "d8312906b39dd4bd671af7bd853efda9b362e720",
"size": "19536",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neomodel/match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125544"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-pipeline'
copyright = u'2011-2014, Timothée Peignier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-pipelinedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-pipeline.tex', u'Pipeline Documentation',
u'Timothée Peignier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-pipeline', u'Pipeline Documentation',
[u'Timothée Peignier'], 1)
]
| {
"content_hash": "5db493446e22009fb054fdb07fae89aa",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 80,
"avg_line_length": 32.44117647058823,
"alnum_prop": 0.7068600785735872,
"repo_name": "lexqt/django-pipeline",
"id": "c77282b2619f4fefbbd636fffd528fa26959f7ca",
"size": "7040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1601"
},
{
"name": "CoffeeScript",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "2525"
},
{
"name": "JavaScript",
"bytes": "1760"
},
{
"name": "LiveScript",
"bytes": "26"
},
{
"name": "Python",
"bytes": "115537"
}
],
"symlink_target": ""
} |
from ..requestlist import RequestList
from ..played import Played
from ..song import Song
from ..artist import Artist
from ..album import Album
from ..siteoptions import SiteOptions
from ..users import Users
from ..suggestions import Suggestions
from ..mistags import Mistags
from ..catalog import Catalog
import sqlalchemy
from sqlalchemy.sql import func, or_
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.serializer import dumps # For backing up tables.
from time import time
from datetime import datetime
import hashlib # Used to verify admin passwords
#import scrypt # Add this
class Queries:
def __init__(self, db=None):
if db is None:
print("usage: Queries(db='database session')")
self.db = db
self.model = None
@property
def options(self):
try:
return self.__options
except: # Tried to get options before set, so get the options
self.options = self.get_options()
return self.__options
@options.setter
def options(self, value):
self.__options = value
@property
def catalogs(self):
try:
return self.__catalogs
except: # Tried to get the catalogs before set, so get the catalogs
self.catalogs = self.options.catalog.split(",")
return self.__catalogs
@catalogs.setter
def catalogs(self, value):
self.__catalogs = value
@property
def model(self):
return self.__model
@model.setter
def model(self, value): # Allows changing the model used in some queries
self.__model = value
def verify_user(self, uname, pword):
try:
self.db.query(Users).\
filter(Users.uname == uname,
Users.pword == hashlib.md5(pword.encode()).hexdigest()).one()
return True
except NoResultFound:
return False
def get_options(self):
return self.db.query(SiteOptions).one()
def get_song_stats(self):
return self.db.query(func.sum(Song.size).label('song_size'),
func.count(Song.id).label('total_songs'),
func.avg(Song.size).label('avg_song_size'),
func.sum(Song.time).label('song_time'),
func.avg(Song.time).label('avg_song_time')).\
filter(Song.catalog_id.in_(self.catalogs)).one()
def get_total_artists(self):
return self.db.query(func.count(Artist.fullname.distinct()).label('total')).\
join(Song).filter(Song.catalog_id.in_(self.catalogs)).one()
def get_total_albums(self):
return self.db.query(func.count(Album.id.distinct()).label('total')).\
join(Song).filter(Song.catalog_id.in_(self.catalogs)).one()
def get_total_played_by_me(self):
return self.db.query(func.count(Played.track_id.distinct()).label('total')).\
join(Song).\
filter(Song.catalog_id.in_(self.catalogs), Played.played_by_me == 1).one()
def get_top_10(self, limit=10):
return self.db.query(func.count(Song.artist_id).label('artist_count'),
Song.artist_id.label('aid'),
Artist.fullname.label('fullname')).\
join(Artist).\
filter(Song.catalog_id.in_(self.catalogs)).\
group_by(Song.artist_id).order_by(func.count(Song.artist_id).desc()).limit(limit)
def get_letters_counts(self):
return self.db.query(func.left(self.model.name, 1).label('letter'),
func.count(self.model.id.distinct()).label('count')).\
join(Song).\
filter(Song.catalog_id.in_(self.catalogs)).\
group_by(func.left(self.model.name, 1))
def get_names_by_letter(self, letter):
try:
self.model.disk # Fails for artist
except AttributeError: # Don't use disk in order_by
names = self.db.query(self.model.fullname.label('fullname'),
self.model.id.label('aid'),
func.count(Song.id).label('songcount')).\
join(Song).\
filter(self.model.name.startswith(letter), Song.catalog_id.in_(self.catalogs)).\
order_by(self.model.name).group_by(self.model.id)
else: # Use disk in order_by
names = self.db.query(self.model.fullname.label('fullname'),
self.model.id.label('aid'),
self.model.disk.label('disk'),
self.model.year.label('year'),
func.count(Song.id).label('songcount')).\
join(Song).\
filter(self.model.name.startswith(letter), Song.catalog_id.in_(self.catalogs)).\
order_by(self.model.name, self.model.disk).\
group_by(self.model.id)
return names
def get_artist_album_by_id(self, id, days=None):
if days is not None:
start_time = time() - 60*60*24*days
return self.db.query(Song).join(self.model).filter(self.model.id == id, Song.addition_time >= start_time).order_by(Song.title)
else:
return self.db.query(self.model).filter(self.model.id == id).one()
def get_song_by_id(self, id):
return self.db.query(Song).filter(Song.id == id).one()
def get_song_by_art_title_alb(self, art=None, title=None, alb=None):
return self.db.query(Song).filter(Song.artist.name == art)
def get_last_played(self, count=50):
try:
return self.db.query(func.count(Played.date_played).label('played_count'),\
func.avg(Song.time).label('avg_time'),\
Played).join(Song).\
filter(Song.catalog_id.in_(self.catalogs)).\
group_by(Played.date_played).\
order_by(Played.date_played.desc()).limit(count)
except sqlalchemy.exc.OperationalError:
return self.db.query(func.count(Played.date_played).label('played_count'), \
func.avg(Song.time).label('avg_time'), \
Played).join(Song). \
filter(Song.catalog_id.in_(self.catalogs)). \
group_by(Played.date_played). \
group_by(Played.played_id). \
order_by(Played.date_played.desc()).limit(count)
def get_requests(self, status='New/Pending'):
return self.db.query(RequestList).filter(or_(*[RequestList.status == s for s in status.split('/')])).order_by(RequestList.id)
def get_requests_info(self, status='New/Pending'):
return self.db.query(func.count(RequestList.id).label('request_count'),
func.sum(Song.time).label('request_length')).\
join(Song).filter(or_(*[RequestList.status == s for s in status.split('/')])).one()
def get_all_requests_info(self):
return self.db.query(func.count(RequestList.status).label('request_count'),
RequestList.status,
func.sum(Song.time).label('request_length')).\
join(Song).group_by(RequestList.status)
def get_multi_albums(self, artist_name, song_title):
#ctx.db.default.query(Song).join(Album).join(Artist).filter(Artist.fullname == r.Played.song.artist.fullname, Song.title == r.Played.song.title)
return self.db.query(Song).join(Album).join(Artist).filter(Artist.fullname == artist_name, Song.title == song_title)
def get_new_artists(self, days=7):
start_time = time() - 60*60*24*days
try:
return self.db.query(func.count(Song.artist_id).label('new_count'), func.sum(Song.time), func.sum(Song.size), Song).\
filter(Song.addition_time >= start_time, Song.catalog_id.in_(self.catalogs)).\
order_by(Song.addition_time.desc()).group_by(Song.artist_id)
except sqlalchemy.exc.OperationalError:
return self.db.query(func.count(Song.artist_id).label('new_count'), func.sum(Song.time),
func.sum(Song.size), Song). \
filter(Song.addition_time >= start_time, Song.catalog_id.in_(self.catalogs)).\
order_by(Song.addition_time.desc()).group_by(Song.artist_id).group_by(Song.catalog)
def get_new_counts(self, days=180):
start_time = time() - 60*60*24*days
return self.db.query(func.count(Song.id).label('new_count'), func.sum(Song.time), func.sum(Song.size)).\
filter(Song.addition_time >= start_time, Song.catalog_id.in_(self.catalogs)).one()
def get_top_played_by(self, played_by_me=False, limit=10):
if played_by_me == 'all':
p = self.db.query(func.count(Played.date_played).label('played_count'),
func.max(Played.date_played).label('date_played'), Played).\
join(Song).join(Artist).filter(Song.catalog_id.in_(self.catalogs)).\
group_by(Song.title, Artist.fullname).order_by(func.count(Played.track_id).desc()).limit(limit)
else:
p = self.db.query(Played, func.count(Played.date_played).label('played_count'),
func.max(Played.date_played).label('date_played')).\
join(Song).join(Artist).filter(Song.catalog_id.in_(self.catalogs), Played.played_by_me == played_by_me).\
group_by(Song.title, Artist.fullname).order_by(func.count(Played.track_id).desc()).limit(limit)
return p
def get_top_requested(self, limit=10):
return self.db.query(Song).join(RequestList).\
filter(Song.catalog_id.in_(self.catalogs), RequestList.status == 'played').\
group_by(RequestList.song_id).\
order_by(func.count(RequestList.song_id).desc()).limit(limit)
def get_top_requestors(self, limit=10):
return self.db.query(func.count(RequestList.name).label('request_count'),
RequestList.name.label('requestor'),
func.max(RequestList.t_stamp).label('last_request')).\
join(Song).filter(Song.catalog_id.in_(self.catalogs)).group_by(RequestList.name).\
order_by(func.count(RequestList.name).desc()).limit(limit)
def get_song_by_ata(self, artist, title, album):
return self.db.query(Song).join(Artist).join(Album).\
filter(Album.prename == album).\
filter(Artist.fullname == artist).\
filter(Song.title == title)
def add_played_song(self, track_id, played_by, played_by_me):
np = Played(track_id=track_id, date_played=datetime.utcnow(), played_by=played_by, played_by_me=played_by_me)
self.db.add(np)
self.db.commit()
def full_text_search(self, phrase):
return self.db.query(Song).join(Artist).join(Album).\
filter(((Song.title.match(phrase)) | (Artist.name.match(phrase)) | (Album.name.match(phrase))), Song.catalog_id.in_(self.catalogs))
def advanced_search(self, search_for, phrase):
search = {'title': Song.title,
'artist': Artist.name,
'album': Album.name}
return self.db.query(Song).join(Artist).join(Album).filter(search[search_for].like(phrase), Song.catalog_id.in_(self.catalogs))
def get_current_requests(self):
return self.db.query(RequestList).\
filter((RequestList.status == 'new') | (RequestList.status == 'pending')).order_by(RequestList.id)
def get_suggestions(self):
return self.db.query(Suggestions)
def delete_suggestion(self, id):
row = self.db.query(Suggestions).filter(Suggestions.id==id).one()
self.db.delete(row)
return self.db.commit()
def get_suggestions_count(self):
return self.db.query(func.count(Suggestions.id).label('suggestions_count')).one()
def get_mistags_count(self):
return self.db.query(func.count(Mistags.id).label('mistags_count')).one()
def get_mistags(self):
return self.db.query(Mistags)
def delete_mistag(self, id):
row = self.db.query(Mistags).filter(Mistags.id==id).one()
self.db.delete(row)
return self.db.commit()
def change_request_status(self, id, status):
row = self.db.query(RequestList).filter(RequestList.id==id).one()
if status == 'delete':
self.db.delete(row)
else:
row.status = status
self.db.commit()
return row
def get_siteoptions(self):
return self.db.query(SiteOptions).one()
def save_siteoptions(self, **args):
row = self.db.query(SiteOptions).filter(SiteOptions.id==args['sid']).one()
for field in args:
if field == 'cat_group':
row.catalog = ','.join(args[field])
elif field != 'sid':
row.__setattr__(field, args[field])
return self.db.commit()
def get_catalogs(self):
return self.db.query(Catalog).order_by(Catalog.name)
def backup_database(self):
tables = (RequestList, Played, Song, Artist, Album, Mistags, Catalog)
s = []
for t in tables:
s.append(dumps(self.db.query(t).all()))
return s
def is_updating(self):
return False
| {
"content_hash": "2b133c50c020e39510f223b87f781e8e",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 152,
"avg_line_length": 45.40453074433657,
"alnum_prop": 0.5636493228795438,
"repo_name": "bmillham/djrq2",
"id": "5dbad4a68ffe56fb5d97bc8982f0fb1571d44ca5",
"size": "14030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/app/djrq/model/ampache/queries/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22622"
},
{
"name": "JavaScript",
"bytes": "59510"
},
{
"name": "Python",
"bytes": "267514"
},
{
"name": "Shell",
"bytes": "1030"
}
],
"symlink_target": ""
} |
'''Run two retrace instances in parallel, comparing generated snapshots.
'''
import math
import optparse
import os.path
import subprocess
import platform
import sys
from PIL import Image
from snapdiff import Comparer
from highlight import AutoHighlighter
import jsondiff
# Null file, to use when we're not interested in subprocesses output
NULL = open(os.path.devnull, 'wb')
class RetraceRun:
def __init__(self, process):
self.process = process
def nextSnapshot(self):
image, comment = read_pnm(self.process.stdout)
if image is None:
return None, None
callNo = int(comment.strip())
return image, callNo
def terminate(self):
try:
self.process.terminate()
except OSError:
# Avoid http://bugs.python.org/issue14252
pass
class Retracer:
def __init__(self, retraceExe, args, env=None):
self.retraceExe = retraceExe
self.args = args
self.env = env
def _retrace(self, args, stdout=subprocess.PIPE):
cmd = [
self.retraceExe,
] + args + self.args
if self.env:
for name, value in self.env.iteritems():
sys.stderr.write('%s=%s ' % (name, value))
sys.stderr.write(' '.join(cmd) + '\n')
try:
return subprocess.Popen(cmd, env=self.env, stdout=stdout, stderr=NULL)
except OSError, ex:
sys.stderr.write('error: failed to execute %s: %s\n' % (cmd[0], ex.strerror))
sys.exit(1)
def retrace(self, args):
p = self._retrace([])
p.wait()
return p.returncode
def snapshot(self, call_nos):
process = self._retrace([
'-s', '-',
'-S', call_nos,
])
return RetraceRun(process)
def dump_state(self, call_no):
'''Get the state dump at the specified call no.'''
p = self._retrace([
'-D', str(call_no),
])
state = jsondiff.load(p.stdout)
p.wait()
return state.get('parameters', {})
def diff_state(self, ref_call_no, src_call_no, stream):
'''Compare the state between two calls.'''
ref_state = self.dump_state(ref_call_no)
src_state = self.dump_state(src_call_no)
stream.flush()
differ = jsondiff.Differ(stream)
differ.visit(ref_state, src_state)
stream.write('\n')
def read_pnm(stream):
'''Read a PNM from the stream, and return the image object, and the comment.'''
magic = stream.readline()
if not magic:
return None, None
magic = magic.rstrip()
if magic == 'P5':
channels = 1
bytesPerChannel = 1
mode = 'L'
elif magic == 'P6':
channels = 3
bytesPerChannel = 1
mode = 'RGB'
elif magic == 'Pf':
channels = 1
bytesPerChannel = 4
mode = 'R'
elif magic == 'PF':
channels = 3
bytesPerChannel = 4
mode = 'RGB'
elif magic == 'PX':
channels = 4
bytesPerChannel = 4
mode = 'RGB'
else:
raise Exception('Unsupported magic `%s`' % magic)
comment = ''
line = stream.readline()
while line.startswith('#'):
comment += line[1:]
line = stream.readline()
width, height = map(int, line.strip().split())
maximum = int(stream.readline().strip())
if bytesPerChannel == 1:
assert maximum == 255
else:
assert maximum == 1
data = stream.read(height * width * channels * bytesPerChannel)
if bytesPerChannel == 4:
# Image magic only supports single channel floating point images, so
# represent the image as numpy arrays
import numpy
pixels = numpy.fromstring(data, dtype=numpy.float32)
pixels.resize((height, width, channels))
return pixels, comment
image = Image.frombuffer(mode, (width, height), data, 'raw', mode, 0, 1)
return image, comment
def dumpNumpyImage(output, pixels, filename):
height, width, channels = pixels.shape
import numpy
pixels = (pixels*255).clip(0, 255).astype('uint8')
if 0:
# XXX: Doesn't work somehow
im = Image.fromarray(pixels)
else:
# http://code.activestate.com/recipes/577591-conversion-of-pil-image-and-numpy-array/
pixels = pixels.reshape(height*width, channels)
if channels == 4:
mode = 'RGBA'
else:
if channels < 3:
pixels = numpy.c_[arr, 255*numpy.ones((heigth * width, 3 - channels), numpy.uint8)]
assert channels == 3
mode = 'RGB'
im = Image.frombuffer(mode, (width, height), pixels.tostring(), 'raw', mode, 0, 1)
im.save(filename)
if 0:
# Dump to stdout
for y in range(height):
output.write(' ')
for x in range(width):
for c in range(channels):
output.write('%0.9g,' % pixels[y, x, c])
output.write(' ')
output.write('\n')
def parse_env(optparser, entries):
'''Translate a list of NAME=VALUE entries into an environment dictionary.'''
if not entries:
return None
env = os.environ.copy()
for entry in entries:
try:
name, var = entry.split('=', 1)
except Exception:
optparser.error('invalid environment entry %r' % entry)
env[name] = var
return env
def main():
'''Main program.
'''
global options
# Parse command line options
optparser = optparse.OptionParser(
usage='\n\t%prog [options] -- [glretrace options] <trace>',
version='%%prog')
optparser.add_option(
'-r', '--retrace', metavar='PROGRAM',
type='string', dest='retrace', default='glretrace',
help='retrace command [default: %default]')
optparser.add_option(
'--ref-driver', metavar='DRIVER',
type='string', dest='ref_driver', default=None,
help='force reference driver')
optparser.add_option(
'--src-driver', metavar='DRIVER',
type='string', dest='src_driver', default=None,
help='force source driver')
optparser.add_option(
'--ref-arg', metavar='OPTION',
type='string', action='append', dest='ref_args', default=[],
help='pass argument to reference retrace')
optparser.add_option(
'--src-arg', metavar='OPTION',
type='string', action='append', dest='src_args', default=[],
help='pass argument to source retrace')
optparser.add_option(
'--ref-env', metavar='NAME=VALUE',
type='string', action='append', dest='ref_env', default=[],
help='add variable to reference environment')
optparser.add_option(
'--src-env', metavar='NAME=VALUE',
type='string', action='append', dest='src_env', default=[],
help='add variable to source environment')
optparser.add_option(
'--diff-prefix', metavar='PATH',
type='string', dest='diff_prefix', default='.',
help='prefix for the difference images')
optparser.add_option(
'-t', '--threshold', metavar='BITS',
type="float", dest="threshold", default=12.0,
help="threshold precision [default: %default]")
optparser.add_option(
'-S', '--snapshot-frequency', metavar='CALLSET',
type="string", dest="snapshot_frequency", default='draw',
help="calls to compare [default: %default]")
optparser.add_option(
'--diff-state',
action='store_true', dest='diff_state', default=False,
help='diff state between failing calls')
optparser.add_option(
'-o', '--output', metavar='FILE',
type="string", dest="output",
help="output file [default: stdout]")
(options, args) = optparser.parse_args(sys.argv[1:])
ref_env = parse_env(optparser, options.ref_env)
src_env = parse_env(optparser, options.src_env)
if not args:
optparser.error("incorrect number of arguments")
if options.ref_driver:
options.ref_args.insert(0, '--driver=' + options.ref_driver)
if options.src_driver:
options.src_args.insert(0, '--driver=' + options.src_driver)
refRetracer = Retracer(options.retrace, options.ref_args + args, ref_env)
srcRetracer = Retracer(options.retrace, options.src_args + args, src_env)
if options.output:
output = open(options.output, 'wt')
else:
output = sys.stdout
highligher = AutoHighlighter(output)
highligher.write('call\tprecision\n')
last_bad = -1
last_good = 0
refRun = refRetracer.snapshot(options.snapshot_frequency)
try:
srcRun = srcRetracer.snapshot(options.snapshot_frequency)
try:
while True:
# Get the reference image
refImage, refCallNo = refRun.nextSnapshot()
if refImage is None:
break
# Get the source image
srcImage, srcCallNo = srcRun.nextSnapshot()
if srcImage is None:
break
assert refCallNo == srcCallNo
callNo = refCallNo
# Compare the two images
if isinstance(refImage, Image.Image) and isinstance(srcImage, Image.Image):
# Using PIL
numpyImages = False
comparer = Comparer(refImage, srcImage)
precision = comparer.precision()
else:
# Using numpy (for floating point images)
# TODO: drop PIL when numpy path becomes general enough
import numpy
assert not isinstance(refImage, Image.Image)
assert not isinstance(srcImage, Image.Image)
numpyImages = True
assert refImage.shape == srcImage.shape
diffImage = numpy.square(srcImage - refImage)
height, width, channels = diffImage.shape
square_error = numpy.sum(diffImage)
square_error += numpy.finfo(numpy.float32).eps
rel_error = square_error / float(height*width*channels)
bits = -math.log(rel_error)/math.log(2.0)
precision = bits
mismatch = precision < options.threshold
if mismatch:
highligher.color(highligher.red)
highligher.bold()
highligher.write('%u\t%f\n' % (callNo, precision))
if mismatch:
highligher.normal()
if mismatch:
if options.diff_prefix:
prefix = os.path.join(options.diff_prefix, '%010u' % callNo)
prefix_dir = os.path.dirname(prefix)
if not os.path.isdir(prefix_dir):
os.makedirs(prefix_dir)
if numpyImages:
dumpNumpyImage(output, refImage, prefix + '.ref.png')
dumpNumpyImage(output, srcImage, prefix + '.src.png')
else:
refImage.save(prefix + '.ref.png')
srcImage.save(prefix + '.src.png')
comparer.write_diff(prefix + '.diff.png')
if last_bad < last_good and options.diff_state:
srcRetracer.diff_state(last_good, callNo, output)
last_bad = callNo
else:
last_good = callNo
highligher.flush()
finally:
srcRun.terminate()
finally:
refRun.terminate()
if __name__ == '__main__':
main()
| {
"content_hash": "ddbe24d470e840395ac801026ef0b22e",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 99,
"avg_line_length": 32.54520547945206,
"alnum_prop": 0.5527401296405421,
"repo_name": "schulmar/apitrace",
"id": "55f771e2184e0d4d6746d1ee70ca7393467840a9",
"size": "13167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/retracediff.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9563"
},
{
"name": "C++",
"bytes": "7969805"
},
{
"name": "CMake",
"bytes": "66632"
},
{
"name": "Emacs Lisp",
"bytes": "204"
},
{
"name": "Java",
"bytes": "16257"
},
{
"name": "Makefile",
"bytes": "1056"
},
{
"name": "Objective-C++",
"bytes": "14170"
},
{
"name": "Python",
"bytes": "2018017"
},
{
"name": "Shell",
"bytes": "1037"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auditlog', '0006_object_pk_index'),
]
operations = [
migrations.AlterField(
model_name='logentry',
name='object_pk',
field=models.CharField(verbose_name='object pk', max_length=255, db_index=True),
),
]
| {
"content_hash": "1a4408655df6d51c5e81e1d33e2a7e87",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 23.5,
"alnum_prop": 0.6052009456264775,
"repo_name": "Zmeylol/auditlog",
"id": "3a724e8d734e7222b823cb4ffcfb66353ba42836",
"size": "447",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/auditlog/migrations/0007_object_pk_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54175"
}
],
"symlink_target": ""
} |
from xpy.Anymethod import anymethod
_is_test_classes = False
class SuperMethod(object):
"""
class MyClass(...):
@SuperMethod
def foo(super, self, *args, **kwargs):
#
# equivalent to super(MyClass, self).foo()
#
super.foo()
#
# A SuperMethod is provided with an appropriately populated super argument
# which is equivalent to super(MyClass, self) for methods which do not have
# access to a reference to *their own defining* class.
#
# This occurs in metaclasses which instantiate their own classes and
# instances of the class which *itself* is in the process of being
# constructed. In that event, there is no reference to the class available
# in the namespace of the function body.
#
"""
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def __get__(self, obj, typ = None):
# Debug.show_args()
#
pre = obj if obj is not None else typ
cls = obj if isinstance(obj, type) else typ
fn_name = getattr(self.fn, 'func_name', getattr(self.fn, '__name__'))
for c in cls.__mro__:
m = c.__dict__.get(fn_name)
if m is not None:
if m is self:
assert m is self
# found method
S = super(c, pre)
break
else:
S = None
def _super(*args, **kwargs):
return self(S, pre, *args, **kwargs)
result = _super
return result
if _is_test_classes:
# SuperFoo.foo will only be invoked once between SuperBar and SuperCar
#
#######################
# #
# SuperFoo #
# / \ #
# SuperBar SuperCar #
# \ / #
# SuperBaz #
# | #
# SuperChaz #
# #
#######################
class SuperFoo(object):
@anymethod
def foo(self, x = None):
print('SuperFoo', 'self', self, 'x', x)
class SuperBar(SuperFoo):
@SuperMethod
def foo(super, self, x = None):
super.foo('bar')
print('SuperBar', 'self', self)
class SuperCar(SuperFoo):
@anymethod
def foo(self, x = None):
super(SuperCar, self).foo('car')
print('SuperCar', 'self', self)
class SuperBaz(SuperBar, SuperCar):
@SuperMethod
def foo(super, self, x = None):
super.foo()
# assert self is super.__self_class__
# assert SuperBaz is super.__thisclass__
print('SuperBaz', 'self', self)
# print('bar', 'self', self)
class SuperChaz(SuperBaz):
pass
| {
"content_hash": "3b375c6d92fe7b34ac5620f840c4f8b1",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 27.941747572815533,
"alnum_prop": 0.49339819318971506,
"repo_name": "dbeal/xpy",
"id": "47c2b6b10eefd94bed1d802a3f5eaeeccc77e9a7",
"size": "2971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xpy/SuperMethod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80012"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
} |
import requests as r
import unittest
import json
import time
class ApiUsersTest(unittest.TestCase):
def setUp(self):
self.URL_PRE = "http://localhost:5000/api/v1"
pass
def test_list_users(self):
response = r.get(self.URL_PRE + "/users")
print "Testing LIST USERS\t\t",
self.assertTrue(response.status_code == 200)
print "PASSED"
def test_create_users(self):
##########################################################
print "Testing CREATE USER ENDPOINT\t\t",
username = "UnitTest@" + str(int(time.time()))
resp = r.post(self.URL_PRE + "/users", data=json.dumps({
"user": {
"username": username,
"password": username + "pw",
"email": username + "email"
}
}))
self.assertTrue(resp.status_code == 200)
print "PASSED"
##########################################################
##########################################################
userid = resp.json()['user']['id']
print "Testing CREATE USER actually worked\t",
resp = r.get(self.URL_PRE + "/users/" + userid)
self.assertTrue(resp.status_code)
jsonresp = resp.json()
self.assertTrue(jsonresp['user']['id'] == userid)
self.assertTrue(jsonresp['user']['username'] == username)
print "PASSED"
##########################################################
| {
"content_hash": "a908493357e9e31bafd083362ea08e80",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 66,
"avg_line_length": 31,
"alnum_prop": 0.4596774193548387,
"repo_name": "krrg/gnomon",
"id": "35171c32a4348d28455e091af3e12293525fd404",
"size": "1753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_api_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3175"
},
{
"name": "JavaScript",
"bytes": "17605"
},
{
"name": "Python",
"bytes": "44037"
}
],
"symlink_target": ""
} |
import json
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from data_api.models import System, Signal, LocalComputer, Blob
import django.utils.timezone as tmz
class TestViews(TestCase):
def setUp(self):
self.client = Client()
# when creating a System
self.system = System.objects.create(name="a_name")
self.local_computer = LocalComputer.objects.create(name="local_computer")
self.client = Client()
def test_get_points(self):
signal = Signal.objects.create(system=self.system, name='a_signal')
n1 = Signal.utc_to_millisec(tmz.now())
n2 = Signal.utc_to_millisec(tmz.now() + tmz.timedelta(seconds=1))
signal.add_points([[1, n1], [2, n2]])
a = reverse('signal_data', args=(signal.id,))
response = self.client.get(reverse('signal_data', args=(signal.id,)))
points = json.loads(response.content)
self.assertEqual(2, len(points))
self.assertEqual(1, points[0][0])
self.assertEqual(2, points[1][0])
self.assertAlmostEqual(n1, points[0][1], 2)
self.assertAlmostEqual(n2, points[1][1], 2)
def test_append_points(self):
signal = Signal.objects.create(system=self.system, name='a_signal')
n1 = Signal.utc_to_millisec(tmz.now())
n2 = Signal.utc_to_millisec(tmz.now() + tmz.timedelta(seconds=1))
a = reverse('signal_data', args=(signal.id,))
json_data = json.dumps([[1, n1], [2, n2]])
response = self.client.post(reverse('signal_data', args=(signal.id,)), data=json_data,
content_type="application/json")
points = signal.get_data()
self.assertEqual(2, len(points))
self.assertEqual(1, points[0][0])
self.assertEqual(2, points[1][0])
self.assertAlmostEqual(n1, points[0][1], 2)
self.assertAlmostEqual(n2, points[1][1], 2)
def test_get_blob(self):
# with a json blob
json_blob = Blob.objects.create(local_computer=self.local_computer, name='a_signal_blob')
# that has some data
data = [{'a': 1, 'b': [1, 2, 3]}, {'a': 4.0, 'b': 'a_string'}]
json_blob.set_data(json.dumps(data))
# should be able to get the data.
response = self.client.get(reverse('blob_data', args=(json_blob.id,)),content_type="application/octet-stream")
json_out = json.loads(response.content)
self.assertDictEqual(json_out[0], data[0])
self.assertDictEqual(json_out[1], data[1])
def test_post_blob(self):
# with a blob
json_blob = Blob.objects.create(local_computer=self.local_computer, name='a_signal_blob')
# when posting data.
data = [{'a': 1, 'b': [1, 2, 3]}, {'a': 4.0, 'b': 'a_string'}]
response = self.client.post(reverse('blob_data', args=(json_blob.id,)), data = json.dumps(data),
content_type="application/octet-stream")
# when we refresh the data
json_blob = Blob.objects.get(id=json_blob.id)
persisted_data = json_blob.get_data()
persisted_json = json.loads(persisted_data)
# we should get the same data out.
self.assertDictEqual(persisted_json[0], data[0])
self.assertDictEqual(persisted_json[1], data[1])
| {
"content_hash": "f6282c4d24fce6e481f905618063e3f3",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 118,
"avg_line_length": 42.67948717948718,
"alnum_prop": 0.6094923400420547,
"repo_name": "kietdlam/Dator",
"id": "4ce137f726fa735603c95fe903cf286064a2fb36",
"size": "3329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_api/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16722"
},
{
"name": "JavaScript",
"bytes": "10757113"
},
{
"name": "Python",
"bytes": "76849"
},
{
"name": "Shell",
"bytes": "2807"
}
],
"symlink_target": ""
} |
import os
import re
import subprocess
import time
import sys
import random
import string
from Queue import Queue, Empty
from threading import Thread
from datetime import date
from sonLib.bioio import logger
from sonLib.bioio import system
from jobTree.batchSystems.abstractBatchSystem import AbstractBatchSystem
from jobTree.src.master import getParasolResultsFileName
class MemoryString:
def __init__(self, string):
if string[-1] == 'K' or string[-1] == 'M' or string[-1] == 'G' or string[-1] == 'T':
self.unit = string[-1]
self.val = float(string[:-1])
else:
self.unit = 'B'
self.val = float(string)
self.bytes = self.byteVal()
def __str__(self):
if self.unit != 'B':
return str(self.val) + self.unit
else:
return str(self.val)
def byteVal(self):
if self.unit == 'B':
return self.val
elif self.unit == 'K':
return self.val * 1000
elif self.unit == 'M':
return self.val * 1000000
elif self.unit == 'G':
return self.val * 1000000000
elif self.unit == 'T':
return self.val * 1000000000000
def __cmp__(self, other):
return cmp(self.bytes, other.bytes)
def prepareBsub(cpu, mem):
mem = '' if mem is None else '-R "select[type==X86_64 && mem > ' + str(int(mem/ 1000000)) + '] rusage[mem=' + str(int(mem/ 1000000)) + ']" -M' + str(int(mem/ 1000000)) + '000'
cpu = '' if cpu is None else '-n ' + str(int(cpu))
bsubline = ["bsub", mem, cpu,"-cwd", ".", "-o", "/dev/null", "-e", "/dev/null"]
return bsubline
def bsub(bsubline):
process = subprocess.Popen(" ".join(bsubline), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
liney = process.stdout.readline()
logger.info("BSUB: " + liney)
result = int(liney.strip().split()[1].strip('<>'))
logger.debug("Got the job id: %s" % (str(result)))
return result
def getjobexitcode(lsfJobID):
job, task = lsfJobID
#first try bjobs to find out job state
args = ["bjobs", "-l", str(job)]
logger.info("Checking job exit code for job via bjobs: " + str(job))
process = subprocess.Popen(" ".join(args), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
started = 0
for line in process.stdout:
if line.find("Done successfully") > -1:
logger.info("bjobs detected job completed for job: " + str(job))
return 0
elif line.find("Completed <exit>") > -1:
logger.info("bjobs detected job failed for job: " + str(job))
return 1
elif line.find("New job is waiting for scheduling") > -1:
logger.info("bjobs detected job pending scheduling for job: " + str(job))
return None
elif line.find("PENDING REASONS") > -1:
logger.info("bjobs detected job pending for job: " + str(job))
return None
elif line.find("Started on ") > -1:
started = 1
if started == 1:
logger.info("bjobs detected job started but not completed: " + str(job))
return None
#if not found in bjobs, then try bacct (slower than bjobs)
logger.info("bjobs failed to detect job - trying bacct: " + str(job))
args = ["bacct", "-l", str(job)]
logger.info("Checking job exit code for job via bacct:" + str(job))
process = subprocess.Popen(" ".join(args), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
for line in process.stdout:
if line.find("Completed <done>") > -1:
logger.info("Detected job completed for job: " + str(job))
return 0
elif line.find("Completed <exit>") > -1:
logger.info("Detected job failed for job: " + str(job))
return 1
logger.info("Cant determine exit code for job or job still running: " + str(job))
return None
class Worker(Thread):
def __init__(self, newJobsQueue, updatedJobsQueue, boss):
Thread.__init__(self)
self.newJobsQueue = newJobsQueue
self.updatedJobsQueue = updatedJobsQueue
self.currentjobs = list()
self.runningjobs = set()
self.boss = boss
def run(self):
while True:
# Load new job ids:
while not self.newJobsQueue.empty():
self.currentjobs.append(self.newJobsQueue.get())
# Launch jobs as necessary:
while len(self.currentjobs) > 0:
jobID, bsubline = self.currentjobs.pop()
lsfJobID = bsub(bsubline)
self.boss.jobIDs[(lsfJobID, None)] = jobID
self.boss.lsfJobIDs[jobID] = (lsfJobID, None)
self.runningjobs.add((lsfJobID, None))
# Test known job list
for lsfJobID in list(self.runningjobs):
exit = getjobexitcode(lsfJobID)
if exit is not None:
self.updatedJobsQueue.put((lsfJobID, exit))
self.runningjobs.remove(lsfJobID)
time.sleep(10)
class LSFBatchSystem(AbstractBatchSystem):
"""The interface for running jobs on lsf, runs all the jobs you
give it as they come in, but in parallel.
"""
def __init__(self, config, maxCpus, maxMemory):
AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory) #Call the parent constructor
self.lsfResultsFile = getParasolResultsFileName(config.attrib["job_tree"])
#Reset the job queue and results (initially, we do this again once we've killed the jobs)
self.lsfResultsFileHandle = open(self.lsfResultsFile, 'w')
self.lsfResultsFileHandle.close() #We lose any previous state in this file, and ensure the files existence
self.currentjobs = set()
self.obtainSystemConstants()
self.jobIDs = dict()
self.lsfJobIDs = dict()
self.nextJobID = 0
self.newJobsQueue = Queue()
self.updatedJobsQueue = Queue()
self.worker = Worker(self.newJobsQueue, self.updatedJobsQueue, self)
self.worker.setDaemon(True)
self.worker.start()
def __des__(self):
#Closes the file handle associated with the results file.
self.lsfResultsFileHandle.close() #Close the results file, cos were done.
def issueJob(self, command, memory, cpu):
jobID = self.nextJobID
self.nextJobID += 1
self.currentjobs.add(jobID)
bsubline = prepareBsub(cpu, memory) + [command]
self.newJobsQueue.put((jobID, bsubline))
logger.info("Issued the job command: %s with job id: %s " % (command, str(jobID)))
return jobID
def getLsfID(self, jobID):
if not jobID in self.lsfJobIDs:
RuntimeError("Unknown jobID, could not be converted")
(job,task) = self.lsfJobIDs[jobID]
if task is None:
return str(job)
else:
return str(job) + "." + str(task)
def killJobs(self, jobIDs):
"""Kills the given job IDs.
"""
for jobID in jobIDs:
logger.info("DEL: " + str(self.getLsfID(jobID)))
self.currentjobs.remove(jobID)
process = subprocess.Popen(["bkill", self.getLsfID(jobID)])
del self.jobIDs[self.lsfJobIDs[jobID]]
del self.lsfJobIDs[jobID]
toKill = set(jobIDs)
while len(toKill) > 0:
for jobID in list(toKill):
if getjobexitcode(self.lsfJobIDs[jobID]) is not None:
toKill.remove(jobID)
if len(toKill) > 0:
logger.critical("Tried to kill some jobs, but something happened and they are still going, so I'll try again")
time.sleep(5)
def getIssuedJobIDs(self):
"""A list of jobs (as jobIDs) currently issued (may be running, or maybe
just waiting).
"""
return self.currentjobs
def getRunningJobIDs(self):
"""Gets a map of jobs (as jobIDs) currently running (not just waiting)
and a how long they have been running for (in seconds).
"""
times = {}
currentjobs = set(self.lsfJobIDs[x] for x in self.getIssuedJobIDs())
process = subprocess.Popen(["bjobs"], stdout = subprocess.PIPE)
for currline in process.stdout:
items = curline.strip().split()
if (len(items) > 9 and (items[0]) in currentjobs) and items[2] == 'RUN':
jobstart = "/".join(items[7:9]) + '/' + str(date.today().year)
jobstart = jobstart + ' ' + items[9]
jobstart = time.mktime(time.strptime(jobstart,"%b/%d/%Y %H:%M"))
jobstart = time.mktime(time.strptime(jobstart,"%m/%d/%Y %H:%M:%S"))
times[self.jobIDs[(items[0])]] = time.time() - jobstart
return times
def getUpdatedJob(self, maxWait):
i = None
try:
sgeJobID, retcode = self.updatedJobsQueue.get(timeout=maxWait)
self.updatedJobsQueue.task_done()
i = (self.jobIDs[sgeJobID], retcode)
self.currentjobs -= set([self.jobIDs[sgeJobID]])
except Empty:
pass
return i
def getWaitDuration(self):
"""We give parasol a second to catch its breath (in seconds)
"""
#return 0.0
return 15
def getRescueJobFrequency(self):
"""Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive. We allow this every 10 minutes..
"""
return 1800
def obtainSystemConstants(self):
p = subprocess.Popen(["lshosts"], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
line = p.stdout.readline()
items = line.strip().split()
num_columns = len(items)
cpu_index = None
mem_index = None
for i in range(num_columns):
if items[i] == 'ncpus':
cpu_index = i
elif items[i] == 'maxmem':
mem_index = i
if cpu_index is None or mem_index is None:
RuntimeError("lshosts command does not return ncpus or maxmem columns")
p.stdout.readline()
self.maxCPU = 0
self.maxMEM = MemoryString("0")
for line in p.stdout:
items = line.strip().split()
if len(items) < num_columns:
RuntimeError("lshosts output has a varying number of columns")
if items[cpu_index] != '-' and items[cpu_index] > self.maxCPU:
self.maxCPU = items[cpu_index]
if items[mem_index] != '-' and MemoryString(items[mem_index]) > self.maxMEM:
self.maxMEM = MemoryString(items[mem_index])
if self.maxCPU is 0 or self.maxMEM is 0:
RuntimeError("lshosts returns null ncpus or maxmem info")
logger.info("Got the maxCPU: %s" % (self.maxMEM))
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()
| {
"content_hash": "d9488bb55daf8bc8d4a762e3508d4430",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 176,
"avg_line_length": 38.72789115646258,
"alnum_prop": 0.5706130335499736,
"repo_name": "benedictpaten/jobTree",
"id": "4bf02c7295bd8d5701f2263f97bc31a55f87277c",
"size": "12507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batchSystems/lsf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "956"
},
{
"name": "Python",
"bytes": "219203"
}
],
"symlink_target": ""
} |
import logging
import re
from random import choice
import requests
from lxml import etree
from spider import consts
def filter_http_tag(string):
"""
过滤网页端的多余字符和标签
:param string:
:return:
"""
pattern = r'<br/?>|\n'
replace_char = ''
string = re.sub(pattern=pattern, repl=replace_char, string=string)
return string.strip()
def generate_http_headers():
"""
生成http请求头
:return: dict
"""
headers = consts.HTTP_HEADER
headers['User-Agent'] = choice(consts.USER_AGENT_LIST)
return headers
def filter_unavailable_proxy(proxy_list, proxy_type='HTTPS'):
"""
过滤掉无用的代理
:param proxy_list: 全部代理列表
:param proxy_type:
:return: 可用的代理列表
"""
available_proxy_list = []
for proxy in proxy_list:
if proxy_type == 'HTTPS':
protocol = 'https'
else:
protocol = 'http'
try:
response = requests.get('https://www.lagou.com/gongsi/0-0-0.json',
proxies={protocol: proxy},
timeout=1)
if response.status_code == consts.HTTP_SUCCESS and 'totalCount' in response.json():
available_proxy_list.append(proxy)
logging.info('可用代理数量 {}'.format(len(available_proxy_list)))
except:
pass
return available_proxy_list
def get_proxys(numbers=400):
proxys = get_proxys_from_66ip(numbers=numbers)
if len(proxys) < numbers:
proxys.extend(get_proxys_from_niaoshao())
return proxys
def get_proxys_from_66ip(numbers=200):
"""获取代理"""
proxy_list = []
url = 'http://www.66ip.cn/nmtq.php?getnum={numbers}&isp=0&anonymoustype=0&start=&ports=&export=&ipaddress=&area=0&proxytype=1&api=66ip'.format(
numbers=numbers)
response = requests.get(url=url)
ip_ports = re.findall(pattern=r'(\d+.\d+.\d+.\d+:\d+)', string=response.text)
for ip_port in ip_ports:
proxy_list.append('http://' + ip_port)
return proxy_list
def get_proxys_from_niaoshao(page=40):
"""获取代理"""
proxy_list = []
url = 'http://www.nianshao.me/?stype=2&page={page_no}'
for page_no in range(1, page + 1):
response = requests.get(url=url.format(page_no=page_no), timeout=10)
html = etree.HTML(response.text)
ips = html.xpath('//tbody/tr/td[1]/text()')
ports = html.xpath('//tbody/tr/td[2]/text()')
assert len(ips) == len(ports)
for (ip, port) in zip(ips, ports):
proxy_list.append(consts.HTTP_PROXY_FORMATTER.format(ip=ip, port=port))
return proxy_list
| {
"content_hash": "5c3868d245cee57bc026878db5274e40",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 147,
"avg_line_length": 29.303370786516854,
"alnum_prop": 0.5966257668711656,
"repo_name": "hehanlin/jobbole",
"id": "19efdf1a7af429af861066fd0fbc03a7f88a13b5",
"size": "2739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spider/utils/http_tools.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "962638"
},
{
"name": "Python",
"bytes": "37012"
}
],
"symlink_target": ""
} |
RESULT_VIEW_NAME = 'gjslint_result_view'
SETTINGS_FILE = "sublime-closure-linter.sublime-settings"
| {
"content_hash": "76ba1cbb8289e3f818071b3ee0543636",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 57,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.78,
"repo_name": "fbzhong/sublime-closure-linter",
"id": "476560615a0f891800b1ef7b93fbe11dad9a2915",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "const.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11585"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from collections import OrderedDict
from operator import attrgetter
from flask import flash, request, session
from sqlalchemy.orm import joinedload, subqueryload
from indico.core.db import db
from indico.modules.events.abstracts.models.abstracts import Abstract, AbstractState
from indico.modules.events.abstracts.models.fields import AbstractFieldValue
from indico.modules.events.abstracts.models.reviews import AbstractReview
from indico.modules.events.contributions.models.fields import ContributionField
from indico.modules.events.tracks.models.tracks import Track
from indico.modules.events.util import ListGeneratorBase
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
class AbstractListGeneratorBase(ListGeneratorBase):
"""Listing and filtering actions in an abstract list."""
show_contribution_fields = True
def __init__(self, event):
super(AbstractListGeneratorBase, self).__init__(event)
self.default_list_config = {
'items': (),
'filters': {'fields': {}, 'items': {}, 'extra': {}}
}
track_empty = {None: _('No track')}
type_empty = {None: _('No type')}
track_choices = OrderedDict((unicode(t.id), t.title) for t in sorted(self.event.tracks,
key=attrgetter('title')))
type_choices = OrderedDict((unicode(t.id), t.name) for t in sorted(self.event.contribution_types,
key=attrgetter('name')))
self.static_items = OrderedDict([
('state', {'title': _('State'), 'filter_choices': {state.value: state.title for state in AbstractState}}),
('submitter', {'title': _('Submitter')}),
('authors', {'title': _('Primary authors')}),
('accepted_track', {'title': _('Accepted track'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('submitted_for_tracks', {'title': _('Submitted for tracks'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('reviewed_for_tracks', {'title': _('Reviewed for tracks'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('accepted_contrib_type', {'title': _('Accepted type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('submitted_contrib_type', {'title': _('Submitted type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('score', {'title': _('Score')}),
('submitted_dt', {'title': _('Submission date')}),
('modified_dt', {'title': _('Modification date')})
])
self.extra_filters = {}
self.list_config = self._get_config()
def _get_static_columns(self, ids):
"""
Retrieve information needed for the header of the static columns.
:return: a list of {'id': ..., 'caption': ...} dicts
"""
return [{'id': id_, 'caption': self.static_items[id_]['title']} for id_ in self.static_items if id_ in ids]
def get_all_contribution_fields(self):
"""Return the list of contribution fields for the event"""
return self.event.contribution_fields if self.show_contribution_fields else []
def _get_sorted_contribution_fields(self, item_ids):
"""Return the contribution fields ordered by their position in the abstract form."""
if not item_ids or not self.show_contribution_fields:
return []
return (ContributionField.query
.with_parent(self.event)
.filter(ContributionField.id.in_(item_ids))
.order_by(ContributionField.position)
.all())
def _get_filters_from_request(self):
filters = super(AbstractListGeneratorBase, self)._get_filters_from_request()
for field in self.event.contribution_fields:
if field.field_type == 'single_choice':
options = request.form.getlist('field_{}'.format(field.id))
if options:
filters['fields'][unicode(field.id)] = options
return filters
def _build_query(self):
return (Abstract.query
.with_parent(self.event)
.options(joinedload('submitter'),
joinedload('accepted_track'),
joinedload('accepted_contrib_type'),
joinedload('submitted_contrib_type'),
joinedload('contribution').load_only('id', 'event_id'),
subqueryload('field_values'),
subqueryload('submitted_for_tracks'),
subqueryload('reviewed_for_tracks'),
subqueryload('person_links'),
subqueryload('reviews').joinedload('ratings'))
.order_by(Abstract.friendly_id))
def _filter_list_entries(self, query, filters):
criteria = []
field_filters = filters.get('fields')
item_filters = filters.get('items')
extra_filters = filters.get('extra')
if not (field_filters or item_filters or extra_filters):
return query
if field_filters:
for contribution_type_id, field_values in field_filters.iteritems():
criteria.append(Abstract.field_values.any(db.and_(
AbstractFieldValue.contribution_field_id == contribution_type_id,
AbstractFieldValue.data.op('#>>')('{}').in_(field_values)
)))
if item_filters:
static_filters = {
'accepted_track': Abstract.accepted_track_id,
'accepted_contrib_type': Abstract.accepted_contrib_type_id,
'submitted_contrib_type': Abstract.submitted_contrib_type_id,
'submitted_for_tracks': Abstract.submitted_for_tracks,
'reviewed_for_tracks': Abstract.reviewed_for_tracks
}
for key, column in static_filters.iteritems():
ids = set(item_filters.get(key, ()))
if not ids:
continue
column_criteria = []
if '_for_tracks' in key:
if None in ids:
column_criteria.append(~column.any())
ids.discard(None)
if ids:
column_criteria.append(column.any(Track.id.in_(ids)))
else:
if None in ids:
column_criteria.append(column.is_(None))
ids.discard(None)
if ids:
column_criteria.append(column.in_(ids))
criteria.append(db.or_(*column_criteria))
if 'state' in item_filters:
states = [AbstractState(int(state)) for state in item_filters['state']]
criteria.append(Abstract.state.in_(states))
if extra_filters:
if extra_filters.get('multiple_tracks'):
submitted_for_count = (db.select([db.func.count()])
.as_scalar()
.where(Abstract.submitted_for_tracks.prop.primaryjoin))
criteria.append(submitted_for_count > 1)
if extra_filters.get('comments'):
criteria.append(Abstract.submission_comment != '')
return query.filter(db.and_(*criteria))
def get_list_kwargs(self):
list_config = self._get_config()
abstracts_query = self._build_query()
total_entries = abstracts_query.count()
abstracts = self._filter_list_entries(abstracts_query, list_config['filters']).all()
dynamic_item_ids, static_item_ids = self._split_item_ids(list_config['items'], 'dynamic')
static_columns = self._get_static_columns(static_item_ids)
dynamic_columns = self._get_sorted_contribution_fields(dynamic_item_ids)
return {
'abstracts': abstracts,
'total_abstracts': total_entries,
'static_columns': static_columns,
'dynamic_columns': dynamic_columns,
'filtering_enabled': total_entries != len(abstracts)
}
def get_list_export_config(self):
list_config = self._get_config()
static_item_ids, dynamic_item_ids = self._split_item_ids(list_config['items'], 'static')
return {
'static_item_ids': static_item_ids,
'dynamic_items': self._get_sorted_contribution_fields(dynamic_item_ids)
}
def render_list(self, abstract=None):
list_kwargs = self.get_list_kwargs()
tpl = get_template_module('events/abstracts/management/_abstract_list.html')
filtering_enabled = list_kwargs.pop('filtering_enabled')
tpl_lists = get_template_module('events/management/_lists.html')
filter_statistics = tpl_lists.render_displayed_entries_fragment(len(list_kwargs['abstracts']),
list_kwargs['total_abstracts'])
return {
'html': tpl.render_abstract_list(**list_kwargs),
'filtering_enabled': filtering_enabled,
'filter_statistics': filter_statistics,
'hide_abstract': abstract not in list_kwargs['abstracts'] if abstract else None
}
def flash_info_message(self, abstract):
flash(_("The abstract '{}' is not displayed in the list due to the enabled filters")
.format(abstract.title), 'info')
class AbstractListGeneratorManagement(AbstractListGeneratorBase):
"""Listing and filtering actions in the abstract list in the management view"""
list_link_type = 'abstract_management'
endpoint = '.manage_abstract_list'
def __init__(self, event):
super(AbstractListGeneratorManagement, self).__init__(event)
self.default_list_config['items'] = ('submitted_contrib_type', 'accepted_contrib_type', 'state')
if event.tracks:
self.default_list_config['items'] += ('submitted_for_tracks', 'reviewed_for_tracks', 'accepted_track')
self.extra_filters = OrderedDict([
('multiple_tracks', {'title': _('Proposed for multiple tracks'), 'type': 'bool'}),
('comments', {'title': _('Must have comments'), 'type': 'bool'})
])
class AbstractListGeneratorDisplay(AbstractListGeneratorBase):
"""Listing and filtering actions in the abstract list in the display view"""
list_link_type = 'abstract_display'
endpoint = '.display_reviewable_track_abstracts'
show_contribution_fields = False
def __init__(self, event, track):
super(AbstractListGeneratorDisplay, self).__init__(event)
self.track = track
self.default_list_config['items'] = ('accepted_contrib_type', 'state')
items = {'submitted_contrib_type', 'submitter', 'accepted_contrib_type', 'state'}
if self.track.can_convene(session.user):
items.add('score')
self.static_items = OrderedDict((key, value)
for key, value in self.static_items.iteritems()
if key in items)
def _build_query(self):
return (super(AbstractListGeneratorDisplay, self)._build_query()
.filter(Abstract.state != AbstractState.invited,
Abstract.reviewed_for_tracks.contains(self.track)))
def get_user_reviewed_abstracts_for_track(self, user, track):
return (Abstract.query
.join(Abstract.reviews)
.filter(AbstractReview.user == user,
Abstract.state != AbstractState.invited,
Abstract.reviewed_for_tracks.contains(track),
~Abstract.is_deleted)
.all())
def get_list_kwargs(self):
kwargs = super(AbstractListGeneratorDisplay, self).get_list_kwargs()
kwargs['reviewed_abstracts'] = self.get_user_reviewed_abstracts_for_track(session.user, self.track)
return kwargs
| {
"content_hash": "e30aa764eeabaa7d5fa5396a9e9cbb87",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 118,
"avg_line_length": 48.76171875,
"alnum_prop": 0.5767844268204758,
"repo_name": "OmeGak/indico",
"id": "2dffedc10d3bb06df3687fe30a1fdf66296eaf33",
"size": "12697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/abstracts/lists.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "547418"
},
{
"name": "HTML",
"bytes": "1366687"
},
{
"name": "JavaScript",
"bytes": "1678182"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4488419"
},
{
"name": "Shell",
"bytes": "2724"
},
{
"name": "TeX",
"bytes": "23051"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""The tests for the time automation."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.bootstrap import _setup_component
import homeassistant.util.dt as dt_util
import homeassistant.components.automation as automation
from tests.common import fire_time_changed, get_test_home_assistant
class TestAutomationTime(unittest.TestCase):
"""Test the event automation."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.components.append('group')
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_if_fires_when_hour_matches(self):
"""Test for firing if hour is matching."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(hour=0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_minute_matches(self):
"""Test for firing if minutes are matching."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'minutes': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(minute=0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_second_matches(self):
"""Test for firing if seconds are matching."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'seconds': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(second=0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_all_matches(self):
"""Test for firing if everything matches."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': 1,
'minutes': 2,
'seconds': 3,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=1, minute=2, second=3))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_seconds(self):
"""Test for firing periodically every second."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'seconds': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=0, minute=0, second=2))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_minutes(self):
"""Test for firing periodically every minute."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'minutes': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=0, minute=2, second=0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_hours(self):
"""Test for firing periodically every hour."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=2, minute=0, second=0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_using_after(self):
"""Test for firing after."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'after': '5:00:00',
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.now.hour }}'
},
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=5, minute=0, second=0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('time - 5', self.calls[0].data['some'])
def test_if_not_working_if_no_values_in_conf_provided(self):
"""Test for failure if no configuration."""
assert not _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=5, minute=0, second=0))
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_using_wrong_after(self):
"""YAML translates time values to total seconds.
This should break the before rule.
"""
assert not _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'after': 3605,
# Total seconds. Hour = 3600 second
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=1, minute=0, second=5))
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_action_before(self):
"""Test for if action before."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'before': '10:00',
},
'action': {
'service': 'test.automation'
}
}
})
before_10 = dt_util.now().replace(hour=8)
after_10 = dt_util.now().replace(hour=14)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=before_10):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=after_10):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after(self):
"""Test for if action after."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'after': '10:00',
},
'action': {
'service': 'test.automation'
}
}
})
before_10 = dt_util.now().replace(hour=8)
after_10 = dt_util.now().replace(hour=14)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=before_10):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=after_10):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_one_weekday(self):
"""Test for if action with one weekday."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'weekday': 'mon',
},
'action': {
'service': 'test.automation'
}
}
})
days_past_monday = dt_util.now().weekday()
monday = dt_util.now() - timedelta(days=days_past_monday)
tuesday = monday + timedelta(days=1)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=monday):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=tuesday):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_list_weekday(self):
"""Test for action with a list of weekdays."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'weekday': ['mon', 'tue'],
},
'action': {
'service': 'test.automation'
}
}
})
days_past_monday = dt_util.now().weekday()
monday = dt_util.now() - timedelta(days=days_past_monday)
tuesday = monday + timedelta(days=1)
wednesday = tuesday + timedelta(days=1)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=monday):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=tuesday):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=wednesday):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
| {
"content_hash": "f303c7d71e9fd59323728def1bf62277",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 72,
"avg_line_length": 32.78238341968912,
"alnum_prop": 0.48759285601390867,
"repo_name": "sffjunkie/home-assistant",
"id": "b36ce8c92b554d76182ad8303f67d25ae63ebf6f",
"size": "12654",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/components/automation/test_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1308067"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2458134"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
} |
from django.views.generic.base import TemplateView
from django.conf import settings
from corehq.apps.app_manager.dbaccessors import get_app
class PreviewAppView(TemplateView):
template_name = 'preview_app/base.html'
urlname = 'preview_app'
def get(self, request, *args, **kwargs):
app = get_app(request.domain, kwargs.pop('app_id'))
return self.render_to_response({
'app': app,
'formplayer_url': settings.FORMPLAYER_URL,
})
| {
"content_hash": "199db23c300a769036e4021ae791c75b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 30.5,
"alnum_prop": 0.6680327868852459,
"repo_name": "qedsoftware/commcare-hq",
"id": "f7cf48054f46720907522366d4eec5d789fa3bef",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/preview_app/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oslo_config import cfg
import testtools
class ConfigurationTest(testtools.TestCase):
def test_defaults(self):
self.assertEqual('0.0.0.0', cfg.CONF.bind_host)
self.assertEqual(1789, cfg.CONF.bind_port)
self.assertFalse(cfg.CONF.tcp_keepalive)
self.assertEqual(600, cfg.CONF.tcp_keepidle)
self.assertEqual(1, cfg.CONF.api_workers)
self.assertEqual('api-paste.ini', cfg.CONF.api_paste_config)
self.assertEqual('keystone', cfg.CONF.auth_strategy)
self.assertEqual(False, cfg.CONF.datasources)
self.assertEqual(False, cfg.CONF.api)
self.assertEqual(False, cfg.CONF.policy_engine)
self.assertTrue(hasattr(cfg.CONF, 'node_id')) # default varies
self.assertEqual(False, cfg.CONF.delete_missing_driver_datasources)
| {
"content_hash": "368a1639b7018734da198154d96e9956",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 40.21739130434783,
"alnum_prop": 0.7027027027027027,
"repo_name": "openstack/congress",
"id": "4dec24cf8e57bf8388620650c725a5fb1dc44d7e",
"size": "1501",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "congress/tests/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2614028"
},
{
"name": "Shell",
"bytes": "45786"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_sql_container_migrate_to_manual_throughput.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.sql_resources.begin_migrate_sql_container_to_manual_throughput(
resource_group_name="rg1",
account_name="ddb1",
database_name="databaseName",
container_name="containerName",
).result()
print(response)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2022-08-15-preview/examples/CosmosDBSqlContainerMigrateToManualThroughput.json
if __name__ == "__main__":
main()
| {
"content_hash": "4c2e89c0d0123997e60ff9950e167969",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 170,
"avg_line_length": 35.65714285714286,
"alnum_prop": 0.7355769230769231,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3e8ed45524df489d0752ba560b28e44df23ca81c",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_sql_container_migrate_to_manual_throughput.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import random
import uuid
from keystone.auth import controllers as auth_controllers
from keystone.common import dependency
from keystone.common import serializer
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone import config
from keystone import contrib
from keystone.contrib.federation import controllers as federation_controllers
from keystone.contrib.federation import utils as mapping_utils
from keystone import exception
from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common import importutils
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.tests import mapping_fixtures
from keystone.tests import test_v3
CONF = config.CONF
LOG = log.getLogger(__name__)
def dummy_validator(*args, **kwargs):
pass
@dependency.requires('federation_api')
class FederationTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'federation'
EXTENSION_TO_ADD = 'federation_extension'
def setup_database(self):
super(FederationTests, self).setup_database()
package_name = '.'.join((contrib.__name__, self.EXTENSION_NAME))
package = importutils.import_module(package_name)
abs_path = migration_helpers.find_migrate_repo(package)
migration.db_version_control(sql.get_engine(), abs_path)
migration.db_sync(sql.get_engine(), abs_path)
class FederatedIdentityProviderTests(FederationTests):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on it's id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=201)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity."""
keys_to_check = self.idp_keys
body = self._http_idp_input()
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=201)
self.put(url, body={'identity_provider': body},
expected_status=409)
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=404)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=404)
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=404)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'description': uuid.uuid4().hex, 'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP 403 code.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body}, expected_status=403)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=404)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=201)
def test_protocol_composite_pk(self):
"""Test whether Keystone let's add two entities with identical
names, however attached to different IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': 409}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': 404}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 code for the GET call after the protocol is deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=404)
class MappingCRUDTests(FederationTests):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=201)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, 200)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(len(entities), 1)
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, 204)
self.get(url, expected_status=404)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=404)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=404)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
class MappingRuleEngineTests(FederationTests):
"""A class for testing the mapping rule engine."""
def test_rule_engine_any_one_of_and_direct_mapping(self):
"""Should return user's name and group id EMPLOYEE_GROUP_ID.
The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
The will test the case where `any_one_of` is valid, and there is
a direct mapping for the users name.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.ADMIN_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('FirstName')
ln = assertion.get('LastName')
full_name = '%s %s' % (fn, ln)
group_ids = values.get('group_ids')
name = values.get('name')
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
self.assertEqual(name, full_name)
def test_rule_engine_no_regex_match(self):
"""Should deny authorization, the email of the tester won't match.
This will not match since the email in the assertion will fail
the regex test. It is set to match any @example.com address.
But the incoming value is set to eviltester@example.org.
RuleProcessor should raise exception.Unauthorized exception.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.BAD_TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
def test_rule_engine_any_one_of_many_rules(self):
"""Should return group CONTRACTOR_GROUP_ID.
The CONTRACTOR_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many rules
must be matched, including an `any_one_of`, and a direct
mapping.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_and_direct_mapping(self):
"""Should return user's name and email.
The CUSTOMER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test the case where a requirement
has `not_any_of`, and direct mapping to a username, no group.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.CUSTOMER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertEqual(group_ids, [])
def test_rule_engine_not_any_of_many_rules(self):
"""Should return group EMPLOYEE_GROUP_ID.
The EMPLOYEE_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many remote
rules must be matched, including a `not_any_of`.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
def _rule_engine_regex_match_and_many_groups(self, assertion):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
A helper function injecting assertion passed as an argument.
Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
"""
mapping = mapping_fixtures.MAPPING_LARGE
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_regex_match_and_many_groups(self):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test a successful regex match
for an `any_one_of` evaluation type, and will have many
groups returned.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.TESTER_ASSERTION)
def test_rule_engine_discards_nonstring_objects(self):
"""Check whether RuleProcessor discards non string objects.
Despite the fact that assertion is malformed and contains
non string objects, RuleProcessor should correctly discard them and
successfully have a match in MAPPING_LARGE.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.MALFORMED_TESTER_ASSERTION)
def test_rule_engine_fails_after_discarding_nonstring(self):
"""Check whether RuleProcessor discards non string objects.
Expect RuleProcessor to discard non string object, which
is required for a correct rule match. Since no rules are
matched expect RuleProcessor to raise exception.Unauthorized
exception.
"""
mapping = mapping_fixtures.MAPPING_SMALL
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
class FederatedTokenTests(FederationTests):
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
AUTH_URL = '/auth/tokens'
def setUp(self):
super(FederationTests, self).setUp()
self.load_sample_data()
self.load_federation_sample_data()
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _assertSerializeToXML(self, json_body):
"""Serialize JSON body to XML.
Serialize JSON body to XML, then deserialize to JSON
again. Expect both JSON dictionaries to be equal.
"""
xml_body = serializer.to_xml(json_body)
json_deserialized = serializer.from_xml(xml_body)
self.assertDictEqual(json_deserialized, json_body)
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
def xor_project_domain(iterable):
return sum(('project' in iterable, 'domain' in iterable)) % 2
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
# Check for either project or domain
if not xor_project_domain(token.keys()):
raise AssertionError("You must specify either"
"project or domain.")
def _issue_unscoped_token(self, assertion='EMPLOYEE_ASSERTION'):
api = federation_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, assertion)
r = api.federated_authentication(context, self.IDP, self.PROTOCOL)
return r
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_serialize_to_xml(self):
"""Issue unscoped token and serialize to XML.
Make sure common.serializer doesn't complain about
the response structure and tag names.
"""
r = self._issue_unscoped_token()
token_resp = r.json_body
# Remove 'extras' if empty or None,
# as JSON and XML (de)serializers treat
# them differently, making dictionaries
# comparisions fail.
if not token_resp['token'].get('extras'):
token_resp['token'].pop('extras')
self._assertSerializeToXML(token_resp)
def test_issue_unscoped_token_no_groups(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='BAD_TESTER_ASSERTION')
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
context = {
'environment': {
'malformed_object': object(),
'another_bad_idea': tuple(xrange(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once(self):
r = self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, self.proj_employees['id'])
self._check_scoped_token_attributes(token_resp)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=401)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.post(self.AUTH_URL, body=body)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=404)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
context, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, self.domainA['id'])
self._check_scoped_token_attributes(token_resp)
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.post(self.AUTH_URL, body=body)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, domain_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_list_projects(self):
url = '/OS-FEDERATION/projects'
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
projects_refs = (set([self.proj_customers['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id']]))
for token, projects_ref in zip(token, projects_refs):
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects, projects_ref)
def test_list_domains(self):
url = '/OS-FEDERATION/domains'
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains, domains_ref)
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to a one of available projects
"""
r = self._issue_unscoped_token()
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/OS-FEDERATION/projects',
token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.post(self.AUTH_URL, body=v3_scope_request)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project['id'])
self._check_scoped_token_attributes(token_resp)
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(group['id'],
group)
role = self.new_role_ref()
self.assignment_api.create_role(role['id'],
role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.post(self.AUTH_URL,
body=scoped_token,
expected_status=500)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non defailt value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = self.new_domain_ref()
self.assignment_api.create_domain(self.domainC['id'],
self.domainC)
# Create and add projects
self.proj_employees = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.project_all['id'],
self.project_all)
# Create and add groups
self.group_employees = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_employees['id'],
self.group_employees)
self.group_customers = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_customers['id'],
self.group_customers)
self.group_admins = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_admins['id'],
self.group_admins)
# Create and add roles
self.role_employee = self.new_role_ref()
self.assignment_api.create_role(self.role_employee['id'],
self.role_employee)
self.role_customer = self.new_role_ref()
self.assignment_api.create_role(self.role_customer['id'],
self.role_customer)
self.role_admin = self.new_role_ref()
self.assignment_api.create_role(self.role_admin['id'],
self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
'any_one_of': [
'testacct@example.com'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
context = {'environment': {}}
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(context, variant)
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
def _inject_assertion(self, context, variant):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = []
| {
"content_hash": "361001bf191af602984783f90193ff1d",
"timestamp": "",
"source": "github",
"line_count": 1544,
"max_line_length": 79,
"avg_line_length": 38.15349740932643,
"alnum_prop": 0.5162708584426828,
"repo_name": "JioCloud/keystone",
"id": "890934b61b8d95e05eb1f1e9f4faf350e94a63cb",
"size": "59455",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystone/tests/test_v3_federation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3007474"
},
{
"name": "Shell",
"bytes": "10512"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from app.core.forms import RegistroDesocupado, RegistroEmpresa
from app.core.forms import RegistroOferta
from app.core.models import *
@login_required
def home(request):
user = request.user
user.refresh_from_db()
return render(request, 'home.html', {'user': user})
def registro_desocupado(request):
# Cuando algo llega a esta vista (llamada desde una URL) puede venir por dos
# vias distintas. Como una petición GET (Se ingresó en la barra de direccion
# del navegador la URL o se siguió un link a esa URL) o como POST (Se envió
# un formulario a esa dirección). Por tanto tengo que procesar ambas
# alternativas.
if request.method == "GET":
# Como es GET solo debo mostrar la página. Llamo a otra función que se
# encargará de eso.
return get_registro_desocupado_form(request)
elif request.method == 'POST':
# Como es POST debo procesar el formulario. Llamo a otra función que se
# encargará de eso.
return handle_registro_desocupado_form(request)
def get_registro_desocupado_form(request):
form = RegistroDesocupado()
return render(request, 'signup.html', {'form': form})
def handle_registro_desocupado_form(request):
form = RegistroDesocupado(request.POST)
# Cuando se crea un formulario a partir del request, ya se obtienen a traves
# de este elemento los datos que el usuario ingresó. Como el formulario de
# Django ya está vinculado a la entidad, entonces hacer form.save() ya crea
# un elemento en la base de datos.
if form.is_valid():
# Primero hay que verificar si el formulario es válido, o sea, si los
# datos ingresados son correctos. Sino se debe mostrar un error.
form.save()
# Si se registró correctamente, se lo envía a la pantalla de login
return redirect('login')
else:
# Quedarse en la misma página y mostrar errores
return render(request, 'signup.html', {'form': form})
def registro_empresa(request):
if request.method == "GET":
return get_registro_empresa_form(request)
elif request.method == 'POST':
return handle_registro_empresa_form(request)
def get_registro_empresa_form(request):
form = RegistroEmpresa()
return render(request, 'signup.html', {'form': form})
def handle_registro_empresa_form(request):
form = RegistroEmpresa(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
return render(request, 'signup.html', {'form': form})
def registro_oferta(request):
if request.method == "GET":
return get_registro_oferta_form(request)
elif request.method == 'POST':
return handle_registro_oferta_form(request)
def get_registro_oferta_form(request):
form = RegistroOferta()
return render(request, 'registrar_oferta.html', {'form': form})
def handle_registro_oferta_form(request):
form = RegistroOferta(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'registrar_oferta.html', {'form': form})
def eliminar(request):
id_eliminar = request.user.id
User.objects.get(id=id_eliminar).delete()
return render(request, 'eliminar.html', {'id': id_eliminar})
def modificar_desocupado(request):
if request.method == "GET":
return get_modificar_desocupado_form(request)
elif request.method == 'POST':
return handle_modificar_desocupado_form(request)
def modificar_empresa(request):
if request.method == "GET":
return get_modificar_empresa_form(request)
elif request.method == 'POST':
return handle_modificar_empresa_form(request)
def handle_modificar_desocupado(request):
form = ModificarDesocupado(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
return render(request, 'modificar_desocupado.html', {'form': form})
def handle_modificar_empresa(request):
form = ModificarEmpresa(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
return render(request, 'modificar_Empresa.html', {'form': form})
| {
"content_hash": "ffa8a2ae50d9977177c436b9616cbcf7",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 80,
"avg_line_length": 35.432,
"alnum_prop": 0.6884172499435538,
"repo_name": "YumiBunny/Gusoktor",
"id": "7e55935f349f8c2b5a60150923f602da18236b0d",
"size": "4446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5520"
},
{
"name": "HTML",
"bytes": "14277"
},
{
"name": "Python",
"bytes": "28024"
}
],
"symlink_target": ""
} |
class EventHandler(list):
"""
A list of callable objects. Calling an instance of this will cause a call to each item in the list in ascending
order by index.
Code taken from: https://stackoverflow.com/a/2022629
To subscribe to the event simply append a function to the event handler:
``event_handler.append(fcn_to_call_on_event)``
"""
def __call__(self, *args, **kwargs):
for f in self:
f(*args, **kwargs)
def __repr__(self):
return "Event(%s)" % list.__repr__(self)
| {
"content_hash": "bd6eb9a7d15e5dffaa47d3633e3339f5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 115,
"avg_line_length": 31.470588235294116,
"alnum_prop": 0.6242990654205608,
"repo_name": "EmuKit/emukit",
"id": "dc1770bacc9c490c197f76830d6329c98a3af48b",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "emukit/core/event_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "972291"
},
{
"name": "Stan",
"bytes": "1413"
}
],
"symlink_target": ""
} |
from lflib.lightfield import LightField
from lflib.imageio import save_image
import numpy as np
import time
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.dia import dia_matrix
from pylflib import compute_long_object_compensation
# -------- Variance stabilizing transforms -----------
def anscombe(x):
return 2.0*np.sqrt(x + 3.0/8.0)
def inverse_anscombe(z):
return (z/2.0)**2 - 3.0/8.0
def generalized_anscombe(x,mu,sigma,gain=1.0):
return (2.0/gain)*np.sqrt(gain*x + (gain**2)*3.0/8.0 + sigma**2 - gain*mu)
def inverse_generalized_anscombe(z,mu,sigma,gain=1.0):
return (gain*z/2.0)**2 - (gain**2)*3.0/8.0 - sigma**2 + gain * mu
# ------------------------------------------------------
# TODO:
#
# Not sure if this is necessary, but it's a handy snippet of code.
#
# # Scale A by dividing by its largest eigenvalue. This keeps the algorithm stable!
# print '\t--> Estimating largest eigenvalue of A using power iteration.'
# (A,b,scale_factor) = ScaleA(A,b)
# print '\t Scaled A by the its largest eigenvalue: ', 1.0/np.square(scale_factor)
#
def ScaleA(A,b):
'''
Returns: [A,b,scale_factor]
Scales mu, A and f so that the largest eigenvalue of A.T*A is 1 and the
new problem
min sum_i (\|w_i|\ + beta/2 \|D_i u - w_i\|^2) + mu/2 \|Au - b\|^2
is equivalent to the old one.
'''
tol = .05;
max_iterations = 10;
# PORT : Handle real complex matrices here!
#
# if not isreal(A(rand(n,1),1)):
# eopts.isreal = false;
# Compute largest eigenvalue by power iteration
x = np.random.rand(A.shape[1])
iteration = 0
norm_z = tol + 1;
while (iteration < max_iterations) and (norm_z > tol):
z = A.rmatvec(A.matvec(x))
norm_z = np.linalg.norm(z)
print norm_z
x = z / norm_z
iteration += 1
if np.real(norm_z) > 1 + 1e-10:
b = b/np.sqrt(norm_z);
A_operator = ScaledLinearOperator(A, 1.0/np.sqrt(norm_z))
A = LinearOperator(A.shape,
matvec=A_operator.matvec,
rmatvec=A_operator.rmatvec,
dtype='float')
return (A, b, 1.0/np.sqrt(norm_z))
class ScaledLinearOperator(object):
def __init__(self, A, scale_factor):
self.A = A
self.scale_factor = scale_factor
def matvec(self, x ):
return self.A.matvec(x) * self.scale_factor
def rmatvec(self, x ):
return self.A.rmatvec(x) * self.scale_factor
class LightFieldOperator(object):
def __init__(self, sirt, db):
self.sirt = sirt
self.db = db
self.left_preconditioner = None
def matvec(self, vol_vec ):
vol = np.reshape(vol_vec, (self.db.ny, self.db.nx, self.db.nz))
im = self.sirt.project(vol).asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im, (im.shape[0]*im.shape[1]))
if self.left_preconditioner == None:
return b
else:
return self.left_preconditioner * b
def rmatvec(self, lf_vec ):
if self.left_preconditioner == None:
lf = np.reshape(lf_vec, (self.db.nt*self.db.nv, self.db.ns*self.db.nu))
else:
lf = np.reshape(self.left_preconditioner*lf_vec, (self.db.nt*self.db.nv, self.db.ns*self.db.nu))
vol = self.sirt.backproject(LightField(lf, self.db.nu, self.db.nv, self.db.ns, self.db.nt,
representation = LightField.TILED_SUBAPERTURE))
return np.reshape(vol, (self.db.nx * self.db.ny * self.db.nz))
def as_linear_operator(self, nrays, nvoxels):
return LinearOperator((nrays, nvoxels),
matvec=self.matvec,
rmatvec=self.rmatvec,
dtype='float')
class NormalEquationLightFieldOperator(object):
def __init__(self, sirt, db):
self.sirt = sirt
self.db = db
def matvec(self, vol_vec ):
vol = np.reshape(vol_vec.astype(np.float32), (self.db.ny, self.db.nx, self.db.nz))
lf = self.sirt.project(vol).asimage(representation = LightField.TILED_SUBAPERTURE)
vol = self.sirt.backproject(LightField(lf, self.db.nu, self.db.nv, self.db.ns, self.db.nt,
representation = LightField.TILED_SUBAPERTURE))
return np.reshape(vol, (self.db.nx * self.db.ny * self.db.nz))
class RegularizedNormalEquationLightFieldOperator(object):
def __init__(self, sirt, db, regularization_lambda, left_preconditioner = None):
self.sirt = sirt
self.db = db
self.regularization_lambda = regularization_lambda
self.left_preconditioner = None
def matvec(self, vol_vec ):
input_vol = np.reshape(vol_vec.astype(np.float32), (self.db.ny, self.db.nx, self.db.nz))
lf = self.sirt.project(input_vol).asimage(representation = LightField.TILED_SUBAPERTURE)
if self.left_preconditioner != None:
lf *= np.square(left_preconditioner)
output_vol = self.sirt.backproject(LightField(lf, self.db.nu, self.db.nv,
self.db.ns, self.db.nt,
representation = LightField.TILED_SUBAPERTURE))
# L2-Norm Regularization
output_vol += self.regularization_lambda * self.regularization_lambda * input_vol
# Laplacian Regularization
# lap_lambda = 15 # FIXME: HARD CODED FOR NOW!
# nx = self.db.nx
# ny = self.db.ny
# nz = self.db.nz
# lapvol = np.copy(input_vol)
# lapvol[0:ny-1,:,:] -= 1/6.0 * input_vol[1:ny ,:,:]
# lapvol[1:ny ,:,:] -= 1/6.0 * input_vol[0:ny-1,:,:]
# lapvol[:,0:nx-1,:] -= 1/6.0 * input_vol[:,1:nx ,:]
# lapvol[:,1:nx ,:] -= 1/6.0 * input_vol[:,0:nx-1,:]
# lapvol[:,:,0:nz-1] -= 1/6.0 * input_vol[:,:,1:nz ]
# lapvol[:,:,1:nz ] -= 1/6.0 * input_vol[:,:,0:nz-1]
# # Zero out laplacian around the edges.
# lapvol[0,:,:] = 0.0;
# lapvol[:,0,:] = 0.0;
# lapvol[:,:,0] = 0.0;
# lapvol[ny-1,:,:] = 0.0;
# lapvol[:,nx-1,:] = 0.0;
# lapvol[:,:,nz-1] = 0.0;
# output_vol += lap_lambda * lap_lambda * lapvol
return np.reshape(output_vol, (self.db.nx * self.db.ny * self.db.nz))
class CgIterationFunctor:
def __init__(self, algorithm_name, linear_operator, b, nvoxels, nrays):
self.algorithm_name = algorithm_name
self.iterations = 0
self.prev_x = None
self.last_time = 0
self.linear_operator = linear_operator
self.b = b
self.nvoxels = nvoxels
self.nrays = nrays
def iter_callback(self, x):
toc = time.time()
if self.prev_x != None:
print '\t--> [ CG Iteration %d (%0.2f seconds) ] ' % (self.iterations,
toc - self.last_time)
if self.linear_operator != None:
residual_norm = np.linalg.norm(self.b - self.linear_operator.matvec(x)) / self.nrays
print '\t Residual Norm: %0.4g' % (residual_norm)
update_norm = np.linalg.norm(self.prev_x-x) / self.nvoxels
print '\t Update Norm: %0.4g' % (update_norm)
self.last_time = toc
self.prev_x = np.copy(x)
self.iterations += 1
# ----------------------------------------------------------------------------------------
# EXPERIMENTAL DECONVOLUTION METHODS
# ----------------------------------------------------------------------------------------
def conjugate_gradient_reconstruction(lfcal, lightfield, alpha,
convergence_threshold, max_iterations, regularization_lambda,
disable_gpu = False, gpu_id = 0):
if lfcal.psf_db != None:
db = lfcal.psf_db
else:
db = lfcal.rayspread_db
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(lfcal.rayspread_db, lfcal.psf_db, disable_gpu = disable_gpu, gpu_id = gpu_id)
lfproj.set_premultiplier(lfcal.radiometric_correction)
# Form the b matrix
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, (im_subaperture.shape[0]*im_subaperture.shape[1]))
# Apply the preconditioner
preconditioner = None
# preconditioner = 1.0 / np.sqrt(7.0 + b) # Read noise variance of 7 for now...
if preconditioner != None:
b *= preconditioner
# Uncomment to swap between zero initial volume and focal stack
# volume. (Zero volume start seems to converge faster, though).
vol_vec = np.zeros((db.ny*db.nx*db.nz), dtype=np.float32)
#vol_vec = np.reshape(lfproj.backproject(lightfield), (db.nx * db.ny * db.nz))
# Conjugate gradient requires a square A matrix, so we solve the
# normal equations below, rather than the original problem Ax = b.
A_operator = LightFieldOperator(lfproj, db)
AtA_operator = RegularizedNormalEquationLightFieldOperator(lfproj, db, regularization_lambda, preconditioner)
A = LinearOperator((db.nt*db.nv*db.ns*db.nu, db.nx*db.ny*db.nz),
matvec=A_operator.matvec, rmatvec=A_operator, dtype='float')
AtA = LinearOperator((db.nx*db.ny*db.nz, db.nx*db.ny*db.nz),
matvec=AtA_operator.matvec, rmatvec=AtA_operator.matvec, dtype='float')
At_b = A_operator.rmatvec(b)
print 'Calling Conjugate Gradient solver...'
from scipy.sparse.linalg import cg
nrays = db.ns*db.nt*db.nu*db.nv
nvoxels = db.nx*db.ny*db.nz
iter_func = CgIterationFunctor("CG", A, b, nvoxels, nrays)
(vol_vec, info) = cg(AtA, At_b, x0 = vol_vec, tol = convergence_threshold, maxiter = max_iterations, callback=iter_func.iter_callback)
# Slight hack: zero out the outermost XY "shell" of pixels, since
# these are often subject to radiometry artifacts.
vol = np.reshape(vol_vec, (db.ny, db.nx, db.nz))
min_val = vol[db.supersample_factor:-db.supersample_factor,
db.supersample_factor:-db.supersample_factor, :].min()
print '\t--> Replacing border values with min value: ', min_val
vol[0:db.supersample_factor, :, :] = min_val
vol[-db.supersample_factor:, :, :] = min_val
vol[:, 0:db.supersample_factor, :] = min_val
vol[:, -db.supersample_factor:, :] = min_val
return vol.astype(np.float32)
# ------------------------------- ADMM TOTAL VARIATION SOLVER ----------------------------------
def l1_shrinkage(x, kappa):
return np.maximum( 0, x - kappa ) - np.maximum( 0, -x - kappa );
def sparse_total_variation_matrix(nx, ny, nz):
'''
Builds a sparse, square matrix that computes the total variation
of a vector x. The total variation matrix is defined as:
n for i==j where n is the number of voxels adjacent to x_i
F_ij = -1 for i \neq j but adjacent to j
0 otherwise
'''
nvoxels = nx*ny*nz
# DEBUGGING: ADD A TINY BIT OF LASSO REGULARIZATION TO STABILIZE THE RECONSTRUCTION
LASSO_REG = 0.0 # 0.1 seems to work well on 1x volumes
from scipy.sparse import coo_matrix
y_coords = np.reshape(np.tile(np.arange(ny, dtype=np.int32), (nx*nz, 1)), (nvoxels), order='f')
x_coords = np.reshape(np.tile(np.reshape(np.tile(np.arange(nx, dtype=np.int32), (nz, 1)),
(nz*nx), order='f'), (ny, 1)), (nx*ny*nz))
z_coords = np.tile(np.arange(nz, dtype=np.int32), (nx*ny))
# Linear index into coordinates involves combining 3D coordinates in (y, x, z) order (for now).
diag_coords = y_coords*nx*nz + x_coords*nz + z_coords
# Form the z+1 difference entries
valid_idxs = np.nonzero(np.logical_and(z_coords-1 >= 0, z_coords+1 < nz))
diff_coords = y_coords*nx*nz + x_coords*nz + (z_coords+1)
Dz = coo_matrix((np.ones(len(valid_idxs[0]))*1.0, (diag_coords[valid_idxs], diff_coords[valid_idxs])),
shape = (nvoxels, nvoxels), dtype=np.float32)
# Form the z-1 difference entries
diff_coords = y_coords*nx*nz + x_coords*nz + (z_coords-1)
Dz = Dz + coo_matrix((np.ones(len(valid_idxs[0]))*-1.0, (diag_coords[valid_idxs], diff_coords[valid_idxs])),
shape = (nvoxels, nvoxels), dtype=np.float32)
# Form the x+1 difference entries
valid_idxs = np.nonzero(np.logical_and(x_coords-1 >= 0, x_coords+1 < nx))
diff_coords = y_coords*nx*nz + (x_coords+1)*nz + z_coords
Dx = coo_matrix((np.ones(len(valid_idxs[0]))*1.0, (diag_coords[valid_idxs], diff_coords[valid_idxs])),
shape = (nvoxels, nvoxels), dtype=np.float32)
# Form the x-1 difference entries
diff_coords = y_coords*nx*nz + (x_coords-1)*nz + z_coords
Dx = Dx + coo_matrix((np.ones(len(valid_idxs[0]))*-1.0, (diag_coords[valid_idxs], diff_coords[valid_idxs])),
shape = (nvoxels, nvoxels), dtype=np.float32)
# Form the y+1 difference entries
valid_idxs = np.nonzero(np.logical_and(y_coords-1 >= 0, y_coords+1 < ny))
diff_coords = (y_coords+1)*nx*nz + x_coords*nz + z_coords
Dy = coo_matrix((np.ones(len(valid_idxs[0]))*1.0, (diag_coords[valid_idxs], diff_coords[valid_idxs])),
shape = (nvoxels, nvoxels), dtype=np.float32)
# Form the y-1 difference entries
diff_coords = (y_coords-1)*nx*nz + x_coords*nz + z_coords
Dy = Dy + coo_matrix((np.ones(len(valid_idxs[0]))*-1.0, (diag_coords[valid_idxs], diff_coords[valid_idxs])),
shape = (nvoxels, nvoxels), dtype=np.float32)
# Tests
print 'Testing F matrix...'
try:
ones = np.ones((F.shape[0],))
Finner1 = (F*ones).reshape(ny,nx,nz)[3:-3,3:-3,3:-3]
print "\t--> Sum of F inner product with ones, cropped:", np.sum(Finner1)
assert(np.sum(Finner1) == 0.0)
except:
print "\t--> First differences matrix has non-zero inner product with vector of ones!!!"
try:
ones = np.ones((F.shape[0],))
Finner1 = (F.T*ones).reshape(ny,nx,nz)[3:-3,3:-3,3:-3]
print "\t--> Sum of F.T inner product with ones, cropped:", np.sum(Finner1)
assert(np.sum(Finner1) == 0.0)
except:
print "\t--> First differences matrix transpose has non-zero inner product with vector of ones!!!"
# TODO: Think about relative weight of Z edges vs. X/Y edges.
import scipy.sparse
vs = scipy.sparse.vstack((Dy, Dx, Dz))
return vs
class AugmentedLightFieldOperator(object):
def __init__(self, sirt, db, rho, structure_matrix):
self.sirt = sirt
self.db = db
self.rho = rho
self.structure_matrix = structure_matrix
def matvec(self, vol_vec ):
# Compute A*x
vol = np.reshape(vol_vec, (self.db.ny, self.db.nx, self.db.nz))
im = self.sirt.project(vol).asimage(representation = LightField.TILED_SUBAPERTURE)
im_vec = np.reshape(im, (im.shape[0]*im.shape[1]))
# Add the L2-Norm regularization term
if self.structure_matrix != None:
reg_vec = np.sqrt(self.rho) * self.structure_matrix * vol_vec
else:
reg_vec = np.sqrt(self.rho) * vol_vec
return np.concatenate((im_vec, reg_vec), axis=0)
def rmatvec(self, vec ):
# Compute transpose(A)*x
lf_vec_len = (self.db.ns*self.db.nt*self.db.nu*self.db.nv)
lf_vec = vec[0:lf_vec_len]
lf = np.reshape(lf_vec, (self.db.nt*self.db.nv, self.db.ns*self.db.nu))
vol = self.sirt.backproject(LightField(lf, self.db.nu, self.db.nv, self.db.ns, self.db.nt,
representation = LightField.TILED_SUBAPERTURE))
vol_vec = np.reshape(vol, (self.db.nx * self.db.ny * self.db.nz))
# Compute rho * reg_vec
if self.structure_matrix != None:
reg_vec = np.sqrt(self.rho) * self.structure_matrix.T * vec[lf_vec_len:]
else:
reg_vec = np.sqrt(self.rho) * vec[lf_vec_len:]
return vol_vec + reg_vec
def admm_total_variation_reconstruction(lfcal, lightfield, alpha,
convergence_threshold, max_iterations,
lambda_tv, lambda_lasso,
initial_volume = None, debug = False,
disable_gpu = False, gpu_id = 0):
if lfcal.psf_db != None:
db = lfcal.psf_db
else:
db = lfcal.rayspread_db
ABSTOL = convergence_threshold # 1e-4 seems to work well
RELTOL = 1e-2;
ALPHA = 1.5
LSQR_ITERATIONS = 2
ADMM_ITERATIONS = max_iterations
# Penalty param hyperparameters
rho = 0.2
mu = 10.0
tau_incr = 2.0
tau_decr = 2.0
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(lfcal.rayspread_db, lfcal.psf_db, disable_gpu = disable_gpu, gpu_id = gpu_id)
# lfproj.set_premultiplier(lfcal.radiometric_correction)
nu = db.nu
nv = db.nv
ns = db.ns
nt = db.nt
# Produce the right hand side
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, (im_subaperture.shape[0]*im_subaperture.shape[1]))
F = sparse_total_variation_matrix(db.nx, db.ny, db.nz)
if lambda_tv <= 0.0:
print 'Error: you must specify a non-zero regularization value (lambda) for total variation.'
exit(1)
# if lambda_lasso > 0.0:
# print '\t--> Stabilizing solution with LASSO structure matrix (i.e. the identity)...'
# from scipy.sparse import eye
# nvoxels = db.nx * db.ny * db.nz
# F = F + lambda_lasso/lambda_tv * eye(nvoxels, nvoxels, dtype=np.float32)
# Create the linear operator
nrays = db.ns*db.nu*db.nt*db.nv
nvoxels = db.nx*db.ny*db.nz
A_operator = AugmentedLightFieldOperator(lfproj, db, rho, structure_matrix = F)
A = LinearOperator((nrays + 3*nvoxels, nvoxels),
matvec=A_operator.matvec,
rmatvec=A_operator.rmatvec,
dtype='float')
print 'Calling ADMM/LSMR total variation solver...'
from lflib.lsmr import lsmr
z = np.zeros(3*(nvoxels), dtype=np.float32)
u = np.zeros(3*(nvoxels), dtype=np.float32)
x = np.zeros((nvoxels), dtype=np.float32)
for i in range(ADMM_ITERATIONS):
tic = time.time()
warm_start = True
if warm_start:
# Solve the X update with LSQR (with warm start)
rhs = np.concatenate(( b, np.sqrt(rho)*(z-u) ), axis=0)
c = rhs - A.matvec(x)
result = lsmr(A, c, damp = lambda_lasso, maxiter = LSQR_ITERATIONS, show=False)
dx = result[0]
x = x + dx
else:
# Solve the X update with LSQR (with warm start)
rhs = np.concatenate(( b, np.sqrt(rho)*(z-u) ), axis=0)
result = lsmr(A, rhs, damp = lambda_lasso, maxiter = LSQR_ITERATIONS, show=False)
x = result[0]
# Enforce non-negativity constraint
x[x<0]=0
# Update z & u
z_old = z
x_hat = ALPHA * F*x + (1 - ALPHA) * z_old;
z = l1_shrinkage( x_hat + u, lambda_tv/rho )
u = u + x_hat - z
# Compute primary and dual residuals
r_norm = np.linalg.norm(F*x - z)
s_norm = np.linalg.norm(-rho * F.T * ( z - z_old ))
# Check for convergence
eps_pri = np.sqrt(nvoxels)*ABSTOL + RELTOL*max(np.linalg.norm(F*x), np.linalg.norm(-z));
eps_dual = np.sqrt(nvoxels)*ABSTOL + RELTOL*np.linalg.norm(rho*F.T*u);
toc = time.time()
print '\t--> [ ADMM total variation iteration %d, rho = %0.2f (%0.2f seconds) ] ' % (i, rho, toc-tic)
print '\t Primal norm: %0.4g\t (eps_pri : %0.4g)' % (r_norm, eps_pri)
print '\t Dual norm: %0.4g\t (eps_dual : %0.4g)' % (s_norm, eps_dual)
# ---- DEBUG ----
A_operator_debug = LightFieldOperator(lfproj, db)
error = A_operator_debug.matvec(x) - b
nrays = db.ns*db.nt*db.nu*db.nv
residual_norm = np.linalg.norm(error) / nrays
print '\t Residual Norm: %0.4g ' % (residual_norm)
# ---------------
if r_norm < eps_pri and s_norm < eps_dual:
print 'ADMM converged.'
break;
# Update the penalty parameter
if r_norm > mu * s_norm:
rho *= tau_incr
u /= tau_incr # The residual must be updated as well!
elif s_norm > mu * r_norm:
rho /= tau_decr
u *= tau_decr # The residual must be updated as well!
A_operator.lambda_tv = rho
# DEBUG: Save out gradient info
print "\t--> Saving gradient volume (for debugging)"
grad_vec = np.power(z[0:nvoxels],2) + np.power(z[nvoxels:2*nvoxels],2) + np.power(z[2*nvoxels:],2)
grad_vol = np.reshape(grad_vec, (db.ny, db.nx, db.nz))
save_image('gradvol.tif', grad_vol, dtype=np.float32)
vol = np.reshape(x, (db.ny, db.nx, db.nz))
# Slight hack: zero out the outermost XY "shell" of pixels, since
# these are often subject to radiometry artifacts.
min_val = vol[db.supersample_factor:-db.supersample_factor,
db.supersample_factor:-db.supersample_factor, :].min()
print '\t--> Replacing border values with min value: ', min_val
vol[0:db.supersample_factor, :, :] = min_val
vol[-db.supersample_factor:, :, :] = min_val
vol[:, 0:db.supersample_factor, :] = min_val
vol[:, -db.supersample_factor:, :] = min_val
return vol.astype(np.float32)
# ------------------------------- ADMM HUBER SOLVER ----------------------------------
def huber_shrinkage(x, kappa):
return np.maximum(0, 1.0 - kappa/np.abs(x)) * x
# result = vec.copy()
# result[np.nonzero(vec > threshold)] -= threshold
# result[np.nonzero(np.abs(vec) < threshold)] = 0.0
# result[np.nonzero(vec < -threshold)] += threshold
# return result
def admm_huber_reconstruction(lfcal, lightfield, alpha,
convergence_threshold, max_iterations,
regularization_lambda,
initial_volume = None, debug = False,
disable_gpu = False, gpu_id = 0):
if lfcal.psf_db != None:
db = lfcal.psf_db
else:
db = lfcal.rayspread_db
ABSTOL = convergence_threshold # 1e-4 seems to work well
RELTOL = 1e-2;
ALPHA = 1.8
LSQR_ITERATIONS = 2
ADMM_ITERATIONS = max_iterations
# Penalty param hyperparameters
rho = 0.01
mu = 10.0
tau_incr = 2.0
tau_decr = 2.0
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(lfcal.rayspread_db, lfcal.psf_db, disable_gpu = disable_gpu, gpu_id = gpu_id)
nu = db.nu
nv = db.nv
ns = db.ns
nt = db.nt
# Produce the right hand side
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, (im_subaperture.shape[0]*im_subaperture.shape[1]))
nrays = db.ns*db.nu*db.nt*db.nv
nvoxels = db.nx*db.ny*db.nz
A_operator = LightFieldOperator(lfproj, db)
A = LinearOperator((nrays, nvoxels),
matvec=A_operator.matvec,
rmatvec=A_operator.rmatvec,
dtype='float')
print 'Calling ADMM/LSQR huber/l1 solver...'
from lflib.lsmr import lsmr
z = np.zeros((nrays), dtype=np.float32)
u = np.zeros((nrays), dtype=np.float32)
x = np.zeros((nvoxels), dtype=np.float32)
for i in range(ADMM_ITERATIONS):
tic = time.time()
# Update RHS
v = b + z - u
result = lsmr(A, v, damp = np.sqrt(2*regularization_lambda/rho),
maxiter = LSQR_ITERATIONS, show=False)
x = result[0]
# Update z with relaxation
zold = z
Ax = A_operator.matvec(x)
Ax_hat = alpha * Ax + (1-alpha) * (zold + b)
tmp = Ax_hat - b + u
# Huber loss
z = rho/(rho+1.0) * tmp + 1.0/(rho+1.0) * huber_shrinkage( tmp, 1.0+1.0/rho )
# Enforce non-negativity constraint
# z[z<0]=0
u = u + Ax_hat - z - b
# Compute primary and dual residuals
r_norm = np.linalg.norm( Ax - z - b )
s_norm = np.linalg.norm( -rho * A_operator.rmatvec( z - zold ) )
# Check for convergence
eps_pri = np.sqrt(nvoxels)*ABSTOL + RELTOL*max(np.linalg.norm(Ax),
np.linalg.norm(-z),
np.linalg.norm(b));
eps_dual = np.sqrt(nvoxels)*ABSTOL + RELTOL*np.linalg.norm(rho*u);
toc = time.time()
print '\t--> [ ADMM huber/l1 iteration %d, rho = %0.2f (%0.2f seconds) ] ' % (i, rho, toc-tic)
print '\t Primal norm: %0.4g\t (eps_pri : %0.4g)' % (r_norm, eps_pri)
print '\t Dual norm: %0.4g\t (eps_dual : %0.4g)' % (s_norm, eps_dual)
# ---- DEBUG ----
A_operator_debug = LightFieldOperator(lfproj, db)
error = A_operator_debug.matvec(x) - b
nrays = db.ns*db.nt*db.nu*db.nv
residual_norm = np.linalg.norm(error) / nrays
print '\t Residual Norm: %0.4g ' % (residual_norm)
# ---------------
if r_norm < eps_pri and s_norm < eps_dual:
print 'ADMM converged.'
break;
# Update the penalty parameter
if r_norm > mu * s_norm:
rho *= tau_incr
u /= tau_incr # The residual must be updated as well!
elif s_norm > mu * r_norm:
rho /= tau_decr
u *= tau_decr # The residual must be updated as well!
vol = np.reshape(x, (db.ny, db.nx, db.nz))
return vol.astype(np.float32)
# ------------------------------- TVAL3 SOLVER ---------------------------------
def tval3_reconstruction(db, lightfield, alpha,
convergence_threshold, max_iterations,
lambda_tv,
initial_volume = None, debug = False,
disable_gpu = False, gpu_id = 0):
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(db, disable_gpu = disable_gpu, gpu_id = gpu_id)
nu = db.nu
nv = db.nv
ns = db.ns
nt = db.nt
# Produce the right hand side
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, (im_subaperture.shape[0]*im_subaperture.shape[1]))
# Create the linear operator
nrays = db.ns*db.nu*db.nt*db.nv
nvoxels = db.nx*db.ny*db.nz
A_operator = LightFieldOperator(lfproj, db)
A = LinearOperator((nrays, nvoxels),
matvec=A_operator.matvec,
rmatvec=A_operator.rmatvec,
dtype='float')
print 'Calling TVAL3 total variation solver...'
## Run TVAL3
from PyTVAL3 import TVAL3
opts = {}
opts['mu'] = np.power(2,8);
opts['beta'] = np.power(2,5);
opts['tol'] = 1E-3;
opts['maxit'] = 300;
opts['TVnorm'] = 1;
opts['nonneg'] = True;
opts['TVL2'] = True;
opts['disp'] = False;
t = time.time();
(U, out) = TVAL3(A,b,(db.ny,db.nx,db.nz),opts);
print U.max()
print U.min()
save_image('test.tif', U, dtype=np.float32)
t = time.time() - t;
# toc = time.time()
# print '\t--> [ ADMM total variation iteration %d, rho = %0.2f (%0.2f seconds) ] ' % (i, rho, toc-tic)
# print '\t Primal norm: %0.4g\t (eps_pri : %0.4g)' % (r_norm, eps_pri)
# print '\t Dual norm: %0.4g\t (eps_dual : %0.4g)' % (s_norm, eps_dual)
# # ---- DEBUG ----
# A_operator_debug = LightFieldOperator(lfproj, db)
# error = A_operator_debug.matvec(x) - b
# nrays = db.ns*db.nt*db.nu*db.nv
# residual_norm = np.linalg.norm(error) / nrays
# print '\t Residual Norm: %0.4g ' % (residual_norm)
# # ---------------
return U.astype(np.float32)
# ------------------------------- LSQR SOLVER ----------------------------------
def lsqr_reconstruction(lfcal, lightfield, alpha,
convergence_threshold, max_iterations,
regularization_lambda,
initial_volume = None, debug = False,
disable_gpu = False, gpu_id = 0):
if lfcal.psf_db != None:
db = lfcal.psf_db
else:
db = lfcal.rayspread_db
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(lfcal.rayspread_db, lfcal.psf_db, disable_gpu = disable_gpu, gpu_id = gpu_id)
lfproj.set_premultiplier(lfcal.radiometric_correction)
# # DEBUG - MANUAL RADIOMETRY FOR TRYING VARIOUS STUFF OUT
# lightfield_im = lightfield.asimage(representation = LightField.TILED_LENSLET)
# vol_ones = db.ones_volume()
# lf_ones = lfproj.project(vol_ones)
# vol_weights = lfproj.backproject(lf_ones)
# ideal_lf = lf_ones.asimage(representation = LightField.TILED_LENSLET)
# rectified_radiometry = lfcal.rectified_radiometry
# # # radiometric_correction = (rectified_radiometry) / (ideal_lf + 1e-16)
# radiometric_correction = 1.0 / (rectified_radiometry)
# radiometric_correction[np.nonzero(rectified_radiometry == 0)] = 0.0 # Prevent NaNs!
# radiometric_correction[np.nonzero(ideal_lf == 0)] = 0.0 # Prevent NaNs!
# lightfield_im *= radiometric_correction
# lightfield = LightField(lightfield_im, db.nu, db.nv, db.ns, db.nt,
# representation = LightField.TILED_LENSLET)
# # # /DEBUG
# Generate the right hand side vector
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, (im_subaperture.shape[0]*im_subaperture.shape[1]))
nrays = db.ns*db.nu*db.nt*db.nv
nvoxels = db.nx*db.ny*db.nz
A_operator = LightFieldOperator(lfproj, db)
A = A_operator.as_linear_operator(nrays, nvoxels)
print 'Calling LSQR solver...'
from scipy.sparse.linalg import lsqr
result = lsqr(A, b, damp = regularization_lambda, iter_lim = max_iterations, show=True)
vol = np.reshape(result[0], (db.ny, db.nx, db.nz))
# Slight hack: zero out the outermost XY "shell" of pixels, since
# these are often subject to radiometry artifacts.
min_val = vol[db.supersample_factor:-db.supersample_factor,
db.supersample_factor:-db.supersample_factor, :].min()
print '\t--> Replacing border values with min value: ', min_val
vol[0:db.supersample_factor, :, :] = min_val
vol[-db.supersample_factor:, :, :] = min_val
vol[:, 0:db.supersample_factor, :] = min_val
vol[:, -db.supersample_factor:, :] = min_val
return vol.astype(np.float32)
# ----------------------------------------------------------------------------------------
# SIRT ITERATIVE DECONVOLUTION
# ----------------------------------------------------------------------------------------
def sirt_reconstruction(lfcal, lightfield, alpha,
convergence_threshold, max_iterations,
regularization_lambda,
debug = False,
long_object = False,
disable_gpu = False, gpu_id = 0,save_errors=False,
debug_path = 'sirt_debug'):
# Prefer wave optics model over geometric optics model
if lfcal.psf_db != None:
db = lfcal.psf_db
else:
db = lfcal.rayspread_db
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(lfcal.rayspread_db, lfcal.psf_db, disable_gpu = disable_gpu, gpu_id = gpu_id)
lfproj.set_premultiplier(lfcal.radiometric_correction)
# DEBUG - MANUAL RADIOMETRY FOR TRYING VARIOUS STUFF OUT
#lightfield_im = lightfield.asimage(representation = LightField.TILED_LENSLET)
#save_image("lambda.tif", lightfield_im, dtype=np.float32);
#lightfield_im /= 10
#lightfield = LightField(lightfield_im, db.nu, db.nv, db.ns, db.nt, representation = LightField.TILED_LENSLET)
# vol_ones = db.ones_volume()
# lf_ones = lfproj.project(vol_ones)
# vol_weights = lfproj.backproject(lf_ones)
# ideal_lf = lf_ones.asimage(representation = LightField.TILED_LENSLET)
# # save_image("ideal_im.jpg", ideal_lf / ideal_lf.max()*255, dtype=np.uint8)
# rectified_radiometry = lfcal.rectified_radiometry
# self.radiometric_correction = self.rectified_radiometry / (self.ideal_lf + 1e-16)
# /DEBUG
# Create a linear operator for the optical model A. This model
# allows us to copmute A or A.T by calling its matvec() and
# rmatvec() methods.
nrays = db.ns*db.nu*db.nt*db.nv
nvoxels = db.nx*db.ny*db.nz
A_operator = LightFieldOperator(lfproj, db)
A = A_operator.as_linear_operator(nrays, nvoxels)
# Generate the b vector, which contains the observed lightfield;
# and the initial volume x containing all zeros.
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, np.prod(im_subaperture.shape))
x = np.zeros((nvoxels), dtype=np.float32)
# Anscombe params
ANSCOMBE = 0
camera_gain = 2.0;
readnoise_mean = 0.0;
readnoise_sigma = 2.51;
if ANSCOMBE == 1:
print '\t--> Stabilizing variance using the anscombe transform'
b = anscombe(b)
elif ANSCOMBE == 2:
print '\t--> Stabilizing variance using the generalized anscombe transform'
b = generalized_anscombe(b, readnoise_mean, readnoise_sigma, camera_gain)
else:
print '\t--> Variance stabilization disabled.'
# Model photon shot noise by setting the noise variance at every
# sensor pixel to be equal to that pixels intenstity plus a
# stationary read noise term. In essence, this approximates the
# photon shot noise + read noise as a non-stationary gaussian
# distribution. See this paper for more on this approximation:
#
# L. Mugnier et. al. MISTAL: a myopic edg-preserving image
# restoration method, and application to astronomical
# adaptive-optics-corrected long-exposure images.
#
# The Neo has 2e- RMS read noise, but I measured it to be 2.81 DN so we use that here.
readnoise_variance = 7.84;
#photon_shotnoise_variance = b
# DEBUG: Experimental code for low SNR zebrafish images
#
# from lflib.imageio import load_image
# variance_lf = LightField(load_image("lambda.tif"), db.nu, db.nv, db.ns, db.nt,
# representation = LightField.TILED_LENSLET)
# variance_photon = variance_lf.asimage(LightField.TILED_SUBAPERTURE)
# photon_shotnoise_variance = np.reshape(variance_photon, np.prod(im_subaperture.shape))
# A_operator.left_preconditioner = 1.0/np.square(photon_shotnoise_variance + readnoise_variance)
# A = A_operator.as_linear_operator(nrays, nvoxels)
# b *= 1.0/np.square(photon_shotnoise_variance + readnoise_variance)
# preconditioner = 1.0/np.square(b + readnoise_variance)
# A_operator.left_preconditioner = preconditioner
# A = A_operator.as_linear_operator(nrays, nvoxels)
# b *= preconditioner
# Create the SIRT weight volume and light field. These are
# created by projecting a volume containing all ones (to create
# the weight lightfield), and then back-projecting a light field
# with all ones (to create the weight volume).
#
# x_weights = A.T * b_ones; b_weights = A * x_ones
import time
tic = time.time()
b_weights = A.matvec(np.ones((nvoxels), dtype=np.float32))
print '\t--> Time for one forward projection: %0.2f seconds.' % (time.time()-tic)
tic = time.time()
x_weights = A.rmatvec(np.ones((nrays), dtype=np.float32))
print '\t Time for one back projection: %0.2f seconds.' % (time.time()-tic)
# --------------------------------------------------------------------
# Make sure that any zero valued weights are set to nonzero so
# that they don't lead to division by zero below. We then
# normalize the starting volume using the volume weights.
min_bweight = b_weights[np.nonzero(b_weights != 0.0)].min()
min_xweight = x_weights[np.nonzero(x_weights != 0.0)].min()
b_weights[np.nonzero(b_weights < min_bweight)] = min_bweight;
x_weights[np.nonzero(x_weights < min_xweight)] = min_xweight;
iteration_error = []
for i in range(max_iterations):
tic = time.time()
# In each iteration, forward and backproject error from all views at once, then update the volume
#
# STEP 1: forward projection of volume to create sub-aperture images.
b_hat = A.matvec(x)
# DEBUGGING
# if i == 1:
# b_debug = np.reshape(b_hat, (db.nt*db.nv, db.ns*db.nu))
# save_image("lf_" + str(i) + ".tif", b_debug);
# STEP 2: Compute error between computed and observed sub-aperture images
error = b - b_hat
if i >= 1:
error[np.nonzero(b_hat == 0.0)] = 0.0
# Debug: save error images
#
#if i == max_iterations-1:
error_test = np.reshape(inverse_anscombe(error), (db.nv*db.nt, db.nu*db.ns))
save_image("error_" + str(i) + ".tif", error_test, dtype=np.float32);
if i == 14:
print 'DEBUG TIME'
save_image("bhat_b_" + str(i) + ".tif", np.reshape(inverse_anscombe(b_hat),
(db.nt*db.nv, db.ns*db.nu)), dtype=np.float32);
# collect the unweighted error in light field space
if save_errors:
iteration_error.append( error )
# the reweighted error
reweighted_error = error / b_weights
# STEP 3: back-project error onto the volume
if ANSCOMBE == 1:
error_backprojected = A.rmatvec(inverse_anscombe(reweighted_error))
elif ANSCOMBE == 2:
error_backprojected = A.rmatvec(inverse_generalized_anscombe(reweighted_error,
readnoise_mean,
readnoise_sigma,
camera_gain))
else:
error_backprojected = A.rmatvec(reweighted_error)
# Graph Laplacian Regularization
#
# WARNING: This code has not been carefully checked and may not work! -broxton
# if regularization_lambda > 0.0:
# vol = np.reshape(x, (db.ny, db.nx, db.nz))
# lapvol = np.copy(vol)
# lapvol[0:ny-1,:,:] -= 1/6.0 * vol[1:ny ,:,:]
# lapvol[1:ny ,:,:] -= 1/6.0 * vol[0:ny-1,:,:]
# lapvol[:,0:nx-1,:] -= 1/6.0 * vol[:,1:nx ,:]
# lapvol[:,1:nx ,:] -= 1/6.0 * vol[:,0:nx-1,:]
# lapvol[:,:,0:nz-1] -= 1/6.0 * vol[:,:,1:nz ]
# lapvol[:,:,1:nz ] -= 1/6.0 * vol[:,:,0:nz-1]
# # Zero out laplacian around the edges.
# lapvol[0,:,:] = 0.0;
# lapvol[:,0,:] = 0.0;
# lapvol[:,:,0] = 0.0;
# lapvol[ny-1,:,:] = 0.0;
# lapvol[:,nx-1,:] = 0.0;
# lapvol[:,:,nz-1] = 0.0;
# lapvol_vec = np.reshape(lapvol, (db.ny*db.nx*db.nz))
# # Apply the reweighting and step size.
# x_update = (error_backprojected / x_weights - regularization_lambda * lapvol_vec)
# else:
# x_update = (error_backprojected / x_weights)
# L2 Penalty (Tihkonov) Regularization
#
if regularization_lambda > 0.0:
x_update = (error_backprojected / x_weights - regularization_lambda * x * x)
else:
x_update = error_backprojected / x_weights
# Apply the update
x += alpha * x_update
# Debugging
# if i == 1:
# vol_debug = np.reshape(x, (db.ny, db.nx, db.nz))
# save_image("vol_" + str(i) + ".tif", vol_debug)
# Enforce non-negativity constraint
x[x<0]=0
# CHECK FOR CONVERGENCE
#
# normalize MSE using input LF
nrays = db.ns*db.nt*db.nu*db.nv
residual_norm = np.linalg.norm(error) / nrays
# normalize MSE using input LF
nvoxels = db.nx*db.ny*db.nz
update_norm = np.linalg.norm(alpha * x_update) / nvoxels
toc = time.time()
print '\t--> [ SIRT Iteration %d (%0.2f seconds) ] ' % (i, toc-tic)
print '\t Residual Norm: %0.4g' % (residual_norm)
print '\t Update Norm: %0.4g (tol = %0.2e) ' % (update_norm, convergence_threshold)
# check if convergence criteria met
if i > 0 and update_norm < convergence_threshold:
break
# save out iteration errors
if save_errors:
np.savez( "iteration_errors", iteration_error )
vol = np.reshape(x, (db.ny, db.nx, db.nz)) # Note that the default order for np.reshape in 'C' (row-major)
# Slight hack: zero out the outermost XY "shell" of pixels, since
# these are often subject to radiometry artifacts.
min_val = vol[db.supersample_factor:-db.supersample_factor,
db.supersample_factor:-db.supersample_factor, :].min()
print '\t--> Replacing border values with min value: ', min_val
vol[0:db.supersample_factor, :, :] = min_val
vol[-db.supersample_factor:, :, :] = min_val
vol[:, 0:db.supersample_factor, :] = min_val
vol[:, -db.supersample_factor:, :] = min_val
return vol.astype(np.float32)
# ----------------------------------------------------------------------------------------
# AMP ITERATIVE DECONVOLUTION
# ----------------------------------------------------------------------------------------
def amp_reconstruction(lfcal, lightfield, alpha,
convergence_threshold, max_iterations,
regularization_lambda,
debug = False,
long_object = False,
disable_gpu = False, gpu_id = 0,save_errors=False,
debug_path = 'sart_debug',
wavelet_smoothing = True):
# import wavelet functions if needed
if wavelet_smoothing:
from lflib.wavelet3d import undecimated_3d, inverse_undecimated_3d, modify_wavelet_coefs, wavelet_threshold
# Prefer wave optics model over geometric optics model
if lfcal.psf_db != None:
db = lfcal.psf_db
else:
db = lfcal.rayspread_db
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(lfcal.rayspread_db, lfcal.psf_db, disable_gpu = disable_gpu, gpu_id = gpu_id)
lfproj.set_radiometry_correction(lfcal.radiometric_correction)
nu = db.nu
nv = db.nv
ns = db.ns
nt = db.nt
# Generate the b vector, which contains the observed lightfield
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, (im_subaperture.shape[0]*im_subaperture.shape[1]))
# Create a linear operator for the optical model A. This model
# allows us to copmute A or A.T by calling its matvec() and
# rmatvec() methods.
nrays = db.ns*db.nu*db.nt*db.nv
nvoxels = db.nx*db.ny*db.nz
A_operator = LightFieldOperator(lfproj, db)
A = A_operator.as_linear_operator(nrays, nvoxels)
# Model photon shot noise by setting the noise variance at every
# sensor pixel to be equal to that pixels intenstity. This should
# be true if photon shot noise is the dominating noise term.
#
EPS = 1e-1 # Avoid dividing by zero! This value works well on fish volumes, but maybe needs tuning?
A_operator.left_preconditioner = 1.0/np.sqrt(b+EPS)
A = A_operator.as_linear_operator(nrays, nvoxels)
b *= 1.0/np.sqrt(b+EPS)
# --------------------------------------------------------------------
if save_errors:
iteration_error = []
for i in range(max_iterations):
tic = time.time()
# In each iteration, forward and backproject error from all views at once, then update the volume
#
# STEP 1: forward projection of volume to create sub-aperture images.
if wavelet_smoothing:
# A \Phi x
b_hat = A.matvec(inverse_undecimated_3d(x))
else:
# Ax
b_hat = A.matvec(x)
# DEBUGGING
# if i == 1:
# b_debug = np.reshape(b_hat, (db.nt*db.nv, db.ns*db.nu))
# save_image("lf_" + str(i) + ".tif", b_debug);
# STEP 2: Compute error between computed and observed sub-aperture images
error = b - b_hat
# collect the unweighted error in light field space
if save_errors:
iteration_error.append( error )
# add AMP adjustment
adjusted_error = error + (1.0/delta)*np.sum(last_estimate)*last_error
# STEP 3: back-project error onto the volume or wavelet coefficient space
if wavelet_smoothing:
error_backprojected = undecimated_3d( A.rmatvec(reweighted_error) )
else:
error_backprojected = A.rmatvec(reweighted_error)
# Apply the update -- should we be using x for wavelet coefs and volume interchangably,
# or should we use a for the wavelet coefs if wavelet_smoothing = True?
if wavelet_smoothing:
x = modify_wavelet_coefs(x, error_backprojected, scale = alpha) # modify wavelets in R
x = wavelet_threshold(x) # threshold modified coefs
else:
x_update = (error_backprojected / x_weights)
x += alpha * x_update
# Debugging
# if i == 1:
# vol_debug = np.reshape(x, (db.ny, db.nx, db.nz))
# save_image("vol_" + str(i) + ".tif", vol_debug)
# Enforce non-negativity constraint
if wavelet_smoothing:
x = wavelet_pos_thresh(x)
else:
x[x<0]=0
# CHECK FOR CONVERGENCE
#
# normalize MSE using input LF
nrays = db.ns*db.nt*db.nu*db.nv
residual_norm = np.linalg.norm(error) / nrays
# normalize MSE using input LF
nvoxels = db.nx*db.ny*db.nz
update_norm = np.linalg.norm(alpha * x_update) / nvoxels
toc = time.time()
print '\t--> [ SIRT Iteration %d (%0.2f seconds) ] ' % (i, toc-tic)
print '\t Residual Norm: %0.4g' % (residual_norm)
print '\t Update Norm: %0.4g (tol = %0.2e) ' % (update_norm, convergence_threshold)
# check if convergence criteria met
if i > 0 and update_norm < convergence_threshold:
break
# save out iteration errors
if save_errors:
np.savez( "iteration_errors", iteration_error )
vol = np.reshape(x, (db.ny, db.nx, db.nz))
# Slight hack: zero out the outermost XY "shell" of pixels, since
# these are often subject to radiometry artifacts.
vol[0:db.supersample_factor, :, :] = 0.0
vol[-db.supersample_factor:, :, :] = 0.0
vol[:, 0:db.supersample_factor, :] = 0.0
vol[:, -db.supersample_factor:, :] = 0.0
return vol.astype(np.float32)
| {
"content_hash": "dc0b3bbbfd68edb981ad42ede9c5fef7",
"timestamp": "",
"source": "github",
"line_count": 1197,
"max_line_length": 173,
"avg_line_length": 40.12698412698413,
"alnum_prop": 0.5682461692205196,
"repo_name": "sophie63/FlyLFM",
"id": "881ddec7d56a9ff38595addcef1cc469b759503a",
"size": "48145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stanford_lfanalyze_v0.4/lflib/iterative_deconvolution.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1662434"
},
{
"name": "C++",
"bytes": "29242"
},
{
"name": "HTML",
"bytes": "25263"
},
{
"name": "Jupyter Notebook",
"bytes": "2841158885"
},
{
"name": "Limbo",
"bytes": "108"
},
{
"name": "M",
"bytes": "22084"
},
{
"name": "MATLAB",
"bytes": "403956"
},
{
"name": "Python",
"bytes": "652037"
},
{
"name": "Shell",
"bytes": "3933"
}
],
"symlink_target": ""
} |
import os
import unittest
from tempfile import TemporaryDirectory
import mock
import unicodecsv as csv
from mock import MagicMock
from airflow.providers.microsoft.azure.operators.oracle_to_azure_data_lake_transfer import (
OracleToAzureDataLakeTransferOperator,
)
class TestOracleToAzureDataLakeTransfer(unittest.TestCase):
mock_module_path = 'airflow.providers.microsoft.azure.operators.oracle_to_azure_data_lake_transfer'
def test_write_temp_file(self):
task_id = "some_test_id"
sql = "some_sql"
sql_params = {':p_data': "2018-01-01"}
oracle_conn_id = "oracle_conn_id"
filename = "some_filename"
azure_data_lake_conn_id = 'azure_data_lake_conn_id'
azure_data_lake_path = 'azure_data_lake_path'
delimiter = '|'
encoding = 'utf-8'
cursor_description = [
('id', "<class 'cx_Oracle.NUMBER'>", 39, None, 38, 0, 0),
('description', "<class 'cx_Oracle.STRING'>", 60, 240, None, None, 1)
]
cursor_rows = [[1, 'description 1'], [2, 'description 2']]
mock_cursor = MagicMock()
mock_cursor.description = cursor_description
mock_cursor.__iter__.return_value = cursor_rows
op = OracleToAzureDataLakeTransferOperator(
task_id=task_id,
filename=filename,
oracle_conn_id=oracle_conn_id,
sql=sql,
sql_params=sql_params,
azure_data_lake_conn_id=azure_data_lake_conn_id,
azure_data_lake_path=azure_data_lake_path,
delimiter=delimiter,
encoding=encoding)
with TemporaryDirectory(prefix='airflow_oracle_to_azure_op_') as temp:
op._write_temp_file(mock_cursor, os.path.join(temp, filename))
assert os.path.exists(os.path.join(temp, filename)) == 1
with open(os.path.join(temp, filename), 'rb') as csvfile:
temp_file = csv.reader(csvfile, delimiter=delimiter, encoding=encoding)
rownum = 0
for row in temp_file:
if rownum == 0:
self.assertEqual(row[0], 'id')
self.assertEqual(row[1], 'description')
else:
self.assertEqual(row[0], str(cursor_rows[rownum - 1][0]))
self.assertEqual(row[1], cursor_rows[rownum - 1][1])
rownum = rownum + 1
@mock.patch(mock_module_path + '.OracleHook',
autospec=True)
@mock.patch(mock_module_path + '.AzureDataLakeHook',
autospec=True)
def test_execute(self, mock_data_lake_hook, mock_oracle_hook):
task_id = "some_test_id"
sql = "some_sql"
sql_params = {':p_data': "2018-01-01"}
oracle_conn_id = "oracle_conn_id"
filename = "some_filename"
azure_data_lake_conn_id = 'azure_data_lake_conn_id'
azure_data_lake_path = 'azure_data_lake_path'
delimiter = '|'
encoding = 'latin-1'
cursor_description = [
('id', "<class 'cx_Oracle.NUMBER'>", 39, None, 38, 0, 0),
('description', "<class 'cx_Oracle.STRING'>", 60, 240, None, None, 1)
]
cursor_rows = [[1, 'description 1'], [2, 'description 2']]
cursor_mock = MagicMock()
cursor_mock.description.return_value = cursor_description
cursor_mock.__iter__.return_value = cursor_rows
mock_oracle_conn = MagicMock()
mock_oracle_conn.cursor().return_value = cursor_mock
mock_oracle_hook.get_conn().return_value = mock_oracle_conn
op = OracleToAzureDataLakeTransferOperator(
task_id=task_id,
filename=filename,
oracle_conn_id=oracle_conn_id,
sql=sql,
sql_params=sql_params,
azure_data_lake_conn_id=azure_data_lake_conn_id,
azure_data_lake_path=azure_data_lake_path,
delimiter=delimiter,
encoding=encoding)
op.execute(None)
mock_oracle_hook.assert_called_once_with(oracle_conn_id=oracle_conn_id)
mock_data_lake_hook.assert_called_once_with(
azure_data_lake_conn_id=azure_data_lake_conn_id)
| {
"content_hash": "41222f780b758a71c6c91fad183d4b55",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 103,
"avg_line_length": 39.52336448598131,
"alnum_prop": 0.5845353511468432,
"repo_name": "wooga/airflow",
"id": "06f6a8c1dc1216866bb001d32780d834c882707d",
"size": "5017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/microsoft/azure/operators/test_oracle_to_azure_data_lake_transfer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
from fylearn import garules
from sklearn.datasets import load_iris
import pytest
def test_classifier():
l = garules.MultimodalEvolutionaryClassifier(n_iterations=100)
X = np.array([
[1, 2, 4],
[2, 4, 8]
])
y = np.array([
0,
1
])
l.fit(X, y)
assert [0] == l.predict([[0.9, 1.7, 4.5]])
assert [1] == l.predict([[2.1, 3.9, 7.8]])
def test_classifier_iris():
iris = load_iris()
X = iris.data
y = iris.target
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(X)
l = garules.MultimodalEvolutionaryClassifier(n_iterations=100, random_state=1)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(l, X, y, cv=10)
mean = np.mean(scores)
assert 0.93 == pytest.approx(mean, 0.01) # using the same random state expect same
# def test_compare_diabetes():
# import os
# csv_file = os.path.join(os.path.dirname(__file__), "diabetes.csv")
# data = np.genfromtxt(csv_file, dtype=float, delimiter=',', names=True)
# X = np.array([data["preg"], data["plas"], data["pres"], data["skin"],
# data["insu"], data["mass"], data["pedi"], data["age"]]).T
# y = data["class"]
# from sklearn.preprocessing import MinMaxScaler
# X = MinMaxScaler().fit_transform(X)
# l = garules.MultimodalEvolutionaryClassifier(n_iterations=100)
# from sklearn import cross_validation
# scores = cross_validation.cross_val_score(l, X, y, cv=10)
# mean = np.mean(scores)
# print "mean", mean
# assert_true(0.68 < mean)
# from sklearn.ensemble import BaggingClassifier
# l = BaggingClassifier(garules.MultimodalEvolutionaryClassifier(n_iterations=100))
# scores = cross_validation.cross_val_score(l, X, y, cv=10)
# mean = np.mean(scores)
# print "mean", mean
# assert_true(0.80 < mean)
| {
"content_hash": "2d76d59990d02eeb512e8ddbfb178fb5",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 87,
"avg_line_length": 24.432098765432098,
"alnum_prop": 0.6260737746336533,
"repo_name": "sorend/fylearn",
"id": "600c71cd11bfe668a22719faa884a4b25cd3ef9b",
"size": "1979",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_garules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138621"
}
],
"symlink_target": ""
} |
"""
Common work items
"""
| {
"content_hash": "4e8ae6c638ab5bec5b68c27e99b68149",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 17,
"avg_line_length": 8.666666666666666,
"alnum_prop": 0.5769230769230769,
"repo_name": "macosforge/ccs-calendarserver",
"id": "347748cd3b133a0041593bb369bf4707f766b238",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txdav/common/datastore/work/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import time
from typing import Optional
from .api_jwk import PyJWKSet, PyJWTSetWithTimestamp
class JWKSetCache:
def __init__(self, lifespan: int):
self.jwk_set_with_timestamp: Optional[PyJWTSetWithTimestamp] = None
self.lifespan = lifespan
def put(self, jwk_set: PyJWKSet):
if jwk_set is not None:
self.jwk_set_with_timestamp = PyJWTSetWithTimestamp(jwk_set)
else:
# clear cache
self.jwk_set_with_timestamp = None
def get(self) -> Optional[PyJWKSet]:
if self.jwk_set_with_timestamp is None or self.is_expired():
return None
return self.jwk_set_with_timestamp.get_jwk_set()
def is_expired(self) -> bool:
return (
self.jwk_set_with_timestamp is not None
and self.lifespan > -1
and time.monotonic()
> self.jwk_set_with_timestamp.get_timestamp() + self.lifespan
)
| {
"content_hash": "482e48967adca23f68c45d84ac771b28",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 29.5,
"alnum_prop": 0.6165254237288136,
"repo_name": "progrium/pyjwt",
"id": "e8c2a7e0a7760b06a2aef00ca2127608ef2f1a1f",
"size": "944",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jwt/jwk_set_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187509"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
def copy_dataset_from_selection(apps, schema_editor):
Task = apps.get_model('project.Task')
for task in Task.objects.all():
task.dataset = task.selection.dataset
task.save()
class Migration(migrations.Migration):
dependencies = [
('dataset', '0008_message_metadata'),
('project', '0006_auto_20150507_1847'),
]
operations = [
migrations.AddField(
model_name='task',
name='dataset',
field=models.ForeignKey(related_name='tasks', default=None, to='dataset.Dataset'),
preserve_default=False,
),
migrations.RunPython(copy_dataset_from_selection),
]
| {
"content_hash": "342ae2a0af08b06207b7cd7c5074efa6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 94,
"avg_line_length": 29.153846153846153,
"alnum_prop": 0.6253298153034301,
"repo_name": "michaelbrooks/uw-message-coding",
"id": "97d725cbc5dbc957ce8d9f3b88d5d24772bc2874",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "message_coding/apps/project/migrations/0007_task_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20931"
},
{
"name": "HTML",
"bytes": "47615"
},
{
"name": "JavaScript",
"bytes": "30966"
},
{
"name": "Python",
"bytes": "123216"
},
{
"name": "Shell",
"bytes": "13307"
}
],
"symlink_target": ""
} |
""" Various utilities useful when using kernels
"""
import logging
from multiprocessing import cpu_count
import numpy as np
import numexpr as ne
def get_kernel_object(name, sparse=True, **kwargs):
""" Factory function to get a kernel object
"""
if sparse:
from .sparse.sparse_kernels import SparseKernel
return SparseKernel(name, **kwargs)
else:
from .dense.dense_kernels import get_dense_kernel
return get_dense_kernel(name, **kwargs)
def get_heuristic_gamma(dist_vals, gamma=None, dist_is_libsvm_fmt=False):
""" Return a heuristic gamma value for Gaussian RBF kernels
Uses the median or mean of distance values
"""
if gamma is None:
gamma = -1
if dist_is_libsvm_fmt:
# make a view of dist_vals without the 1st column (ID)
if dist_vals.ndim != 2:
raise ValueError(
'dist_vals is {}-D not in libsvm_fmt!'.format(dist_vals.ndim))
dist_vals = dist_vals[:, 1:]
if gamma <= 0:
if np.prod(dist_vals.shape) > 1e6:
# median uses a sorted copy of dist_vals: use mean when too large
_sigma2 = dist_vals.mean()
else:
# Note: if NaN in dist_vals, median forgets about it,
# whereas mean is NaN => force (ugly) check here
if not np.alltrue(np.isfinite(dist_vals)):
raise ValueError('NaN in the dist_vals')
_sigma2 = np.median(dist_vals)
if _sigma2 == 0:
# may happen if many zeros (i.e many similar points)
_sigma2 = dist_vals.mean()
if _sigma2 == 0:
logging.warning("constant kernel matrix: use gamma = 1")
gamma = 1.0
elif np.isfinite(_sigma2):
if gamma < 0:
gamma /= - float(_sigma2)
else:
gamma = 1.0 / _sigma2
else:
raise ValueError(
'Invalid kernel values'
' yielding incorrect _sigma2 ({})'.format(_sigma2))
return gamma
def safe_sparse_dot(a, b, dense_output=True):
"""Dot product that handles the sparse matrix case correctly
Note: from sklearn.utils.extmath
"""
from scipy import sparse
if sparse.issparse(a) or sparse.issparse(b):
ret = a * b # matrix multiplication
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return np.dot(a, b)
def safe_len(a):
""" Length of array-like a (number of rows for 2D arrays)
"""
try:
return a.shape[0]
except:
return len(a)
def center_gram(kern_mat, is_sym=True):
""" Center (in place) the Gram (kernel) matrix in the feature space
Mathematical operation: K <- PKP where P = eye(n) - 1/n ones((n,n))
Parameters
----------
kern_mat: (nr, nc) numpy array,
positve semi-definite kernel matrix
is_sym: boolean, optional, default: True,
assume the matrix is symmetric
Returns
-------
cms: (1, nc) numpy array,
column means of the original kernel matrix
mcm: double,
mean of the original column means, which, like cms, are parameters
needed to center in the same way the future kernel evaluations
"""
# number of rows and cols
nr, nc = kern_mat.shape
assert not is_sym or nr == nc, "Matrix cannot be symmetric if not square!"
# mean of the columns of the original matrix (as (1, nc) row vector)
cms = np.mean(kern_mat, 0)[np.newaxis, :]
# mean of the rows (as (nr, 1) column vector)
if is_sym:
rms = cms.T
else:
rms = np.mean(kern_mat, 1)[:, np.newaxis]
# mean of the means over columns (mean of the full matrix)
mcm = np.mean(cms)
# center the matrix (using array broadcasting)
kern_mat += mcm
kern_mat -= cms
kern_mat -= rms
return cms, mcm
def center_rows(kern_rows, cms, mcm):
""" Center (in place) a kernel row in the feature space
WARNING: assumes kernel row NOT IN LIBSVM FORMAT!
Parameters
----------
kern_rows: (m, n) numpy array,
rows of kernel evaluations k(x,x_i) of m test samples x
with a (training) set {x_i}, i=1...n
cms: (1, nc) numpy array,
column means of the original kernel matrix
mcm: double,
mean of the original column means
"""
if kern_rows.ndim == 2:
# multiple rows at once
rows_mean = np.mean(kern_rows, axis=-1)[:, np.newaxis]
else:
# only one row: 1D vector
rows_mean = np.mean(kern_rows)
cms = cms.squeeze() # to broadcast correctly
kern_rows += mcm
kern_rows -= cms
kern_rows -= rows_mean
def kpca(centered_kern_mat, k=2):
""" Perform kernel PCA
The kernel has to be centered and not in libsvm format
Parameters
----------
centered_kern_mat: (n, n) numpy array,
CENTERED Gram array
(NOT in libsvm format and assumed centered!)
k: int, optional, default: 2,
number of (largest) principal components kept
Returns
-------
eigvals: (k,) numpy array,
k largest eigenvalues of the kernel matrix (same as covariance
operator) sorted in ascending order
neigvects: (n, k) numpy array,
corresponding k NORMALIZED eigenvectors of the kernel matrix
m_proj: (k, n) numpy array,
columns of projections onto the k principal components
Notes
-----
The residual (reconstruction error) can be obtained by doing:
r = 1.0/n * (K.trace() - eigvals.sum()) = mean of the smallest n-k eigvals
To project a new vector K[xtest, :] do: dot(K[xtest,:], neigvects).T
"""
from scipy.linalg import eigh
n = centered_kern_mat.shape[0]
# k largest (eigen-value, eigen-vector) pairs of the kernel matrix
eigvals, eigvects = eigh(centered_kern_mat, eigvals=(n - k, n - 1))
# Note: ascending order
sqrt_eigvals = np.sqrt(eigvals)
# project the data onto the principal (normalized) eigen-vectors
m_proj = eigvects * sqrt_eigvals
# Note: equivalent to: dot(centered_kern_mat, eigvects/sqrt_eigvals)
# normalize eigenvectors (useful for projecting new vectors)
eigvects /= sqrt_eigvals
# checks
assert np.all(np.isfinite(eigvals)), \
"Some Nans or Infs in the eigenvalues"
assert np.all(np.isfinite(eigvects)), \
"Some Nans or Infs in the normalized eigenvectors"
assert np.all(np.isfinite(m_proj)), \
"Some Nans or Infs in the projections"
# return the results
return eigvals, eigvects, m_proj.T
def kpca_proj(K_rows, neigvects):
""" Project a sample using pre-computed kPCA (normalized) eigen-vectors
Parameters
----------
K_row: (m, n) numpy array,
column vectors containing K[x, xtrains] where x is the sample to
project and xtrains are the vectors used during kPCA
neigvects: (n, k) numpy array,
normalized principal eigen-vectors as returned by kpca
Returns
-------
proj_col: (k, m) numpy array,
column vectors corresponding to the projections on the principal
components
"""
return np.dot(K_rows, neigvects).T
def _get_kmat(dm2, w, gamma, sim_b):
if sim_b:
return ne.evaluate("w * dm2")
else:
return ne.evaluate("w * exp(-gamma*dm2)")
def combine_kernels(dist_matrices_arg, gamma=None,
is_sim_l=None, weights_l=None, libsvm_fmt=False):
""" Returns a kernel matrix that is the sum of the kernel matrices for the
different distances in dist_matrices_arg
is_sim_l: list of booleans,
stating if the distance matrix is actually a similarity (hence
directly added) or not (in which case its RBF'd using gamma
before adding it)
if libsvm_fmt is True, then distances are assumed with an extra 1st column
and the kernel is returned with an extra ID column for libsvm
Note: the combination is just the average of kernels (no weights)
"""
if gamma is None:
gamma = -1
if isinstance(dist_matrices_arg, (tuple, list)):
# multiple matrices
N = len(dist_matrices_arg)
dist_matrices = dist_matrices_arg
elif isinstance(dist_matrices_arg, np.ndarray):
# only one matrix
N = 1
dist_matrices = [dist_matrices_arg]
else:
raise ValueError(
"Invalid type for 'dist_matrices_arg' ({})".format(
type(dist_matrices_arg)))
if is_sim_l is None:
is_sim_l = [False] * N
if weights_l is None:
weights_l = [1.0 / N] * N
if len(is_sim_l) != N or len(weights_l) != N:
raise ValueError(
"Invalid combination parameter length "
"(N={}, is_sim_l={}, weights_l={})".format(N, is_sim_l, weights_l))
kernel_matrix = None
for w, sim_b, dist_matrix in zip(weights_l, is_sim_l, dist_matrices):
if w > 0:
if (not sim_b) and gamma <= 0:
gamma = get_heuristic_gamma(
dist_matrix, dist_is_libsvm_fmt=libsvm_fmt)
if kernel_matrix is None:
kernel_matrix = _get_kmat(dist_matrix, w, gamma, sim_b)
else:
kernel_matrix += _get_kmat(dist_matrix, w, gamma, sim_b)
if libsvm_fmt:
# set first column for libsvm
kernel_matrix[:, 0] = np.arange(
1, kernel_matrix.shape[0] + 1, dtype=kernel_matrix.dtype)
return kernel_matrix
def mpmap(func, inputs, ncpus=0, with_progressbar=True):
"""Apply function 'func' to all inputs (any iterable)
Use 'ncpus' processes -- defaults to ncores.
Return list of results (in the order of 'inputs')
Note: only worth it if more than 2 cpus
(need one process to serialize/deserialize across channels)
"""
ncpus = int(ncpus)
tot_cpus = cpu_count()
if ncpus == 0 or ncpus > tot_cpus:
ncpus = tot_cpus
elif ncpus < 0:
ncpus = max(1, tot_cpus + ncpus)
# not more processes than inputs
ncpus = min(ncpus, len(inputs))
# activate parallelism only if possible
try:
import pprocess
except ImportError:
logging.warning("Could not find pprocess module: no parallelism")
ncpus = 1
# launch the computations
if ncpus >= 2:
# version with reusable processes
results = pprocess.Map(limit=ncpus, reuse=1, continuous=0)
calc = results.manage(pprocess.MakeReusable(func))
for arg in inputs:
calc(arg)
# store the results (same order as 'inputs')
full_res = []
if with_progressbar:
from progressbar import ProgressBar
idx_inputs = ProgressBar()(xrange(len(inputs)))
else:
idx_inputs = xrange(len(inputs))
for _i in idx_inputs:
full_res.append(results[_i])
else:
# use normal map
full_res = map(func, inputs)
return full_res
| {
"content_hash": "598e8018786264b63bcea987a0b2b07e",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 79,
"avg_line_length": 32.054755043227665,
"alnum_prop": 0.5973208666726603,
"repo_name": "daien/ekovof",
"id": "c68adebc7d223d2a9ff1a0620361258543b44ff7",
"size": "11123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ekovof/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "310398"
},
{
"name": "Makefile",
"bytes": "689"
},
{
"name": "Python",
"bytes": "81901"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from userprofile import views
urlpatterns = [
url(r'^(?P<username>\w+)$',
views.UserProfileDetailView.as_view(), name='user_profile'),
url(r'^(?P<username>\w+)/edit$',
views.UserProfileEditView.as_view(), name='edit_user_profile'),
url(r'^(?P<username>\w+)/follow$',
views.FollowUserView.as_view(), name='follow_user'),
url(r'^(?P<username>\w+)/(?P<direction>following|followers)',
views.FollowListView.as_view(), name='following'),
]
| {
"content_hash": "4ba7569a1578887edbfd50e6445bd078",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 71,
"avg_line_length": 37,
"alnum_prop": 0.638996138996139,
"repo_name": "andela-ooshodi/codango-debug",
"id": "7da8ddfe6ba1ee85ccb0b2472f3a42fddbf06f14",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codango/userprofile/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9396"
},
{
"name": "HTML",
"bytes": "54686"
},
{
"name": "JavaScript",
"bytes": "21439"
},
{
"name": "Python",
"bytes": "82991"
}
],
"symlink_target": ""
} |
import sys
import re
import argparse
import socket
import json
import httplib2
import ssl
import fiapProto
import fiapConfig
FIAPC_TIMEOUT = 10
class CertificateValidationError(httplib2.HttpLib2Error):
pass
def validating_server_factory(config):
# we need to define a closure here because we don't control
# the arguments this class is instantiated with
class ValidatingHTTPSConnection(httplib2.HTTPSConnectionWithTimeout):
def connect(self):
# begin copypasta from HTTPSConnectionWithTimeout
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout != 0:
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
# end copypasta
if config.security_level == 2:
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ctx.load_default_certs(purpose=ssl.Purpose.SERVER_AUTH)
else:
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.set_ciphers(config.ciphers)
if config:
if config.key_file and config.cert_file:
ctx.load_cert_chain(keyfile=config.key_file,
certfile=config.cert_file)
ca_certs = config.ca_certs
if ca_certs:
ctx.load_verify_locations(cafile=ca_certs)
if config.cert_request:
ctx.verify_mode = ssl.CERT_REQUIRED
try:
self.sock = ctx.wrap_socket(sock)
except ssl.SSLError:
# we have to capture the exception here and raise later because
# httplib2 tries to ignore exceptions on connect
import sys
self._exc_info = sys.exc_info()
raise
else:
self._exc_info = None
# this might be redundant
server_cert = self.sock.getpeercert()
if opt.debug >= 2:
print 'DEBUG: server cert=:', server_cert
if not server_cert:
raise CertificateValidationError(repr(server_cert))
for i in server_cert['subjectAltName']:
if opt.debug >= 2:
print 'DEBUG: SAN=', i
def getresponse(self):
if not self._exc_info:
return httplib2.HTTPSConnectionWithTimeout.getresponse(self)
else:
raise self._exc_info[1], None, self._exc_info[2]
return ValidatingHTTPSConnection
def postrequest(url, body=None, ctype='text/xml; charset=utf-8', config=None):
#
# set headers
#
headers = {}
headers['Content-Type'] = ctype
headers['Content-Length'] = str(len(body))
#
# set http_args
#
http_args = {}
if config.security_level:
http_args['connection_type'] = validating_server_factory(config)
#
# start the http connection
#
http = httplib2.Http(timeout=FIAPC_TIMEOUT)
try:
res_headers, res_body = http.request(url, method='POST',
body=body, headers=headers, **http_args)
except Exception as e:
print e, str(type(e))
exit(1)
if opt.debug >= 1:
print 'DEBUG: HTTP: %s %s' % (res_headers.status, res_headers.reason)
if opt.debug >= 2:
print 'DEBUG: === BEGIN: response headers'
for k, v in res_headers.iteritems():
print 'DEBUG: %s: %s' % (k, v)
print 'DEBUG: === END: response headers'
return res_body
def soapGetAddressLocation(wsdl):
service_port = None
f = open(opt.wsdl)
line = f.readlines()
while line:
r = re.search(r'<soap:address location="([^"]+)"', line)
if r != None:
service_port = r.group(1)
break
line = f.readlines()
if service_port == None:
return None
(a, p) = service_port.split(':')
return (a, p)
def set_default_port(url):
(schema, dummy, host) = url.split('/', 2)
path = ''
if host.count('/') != 0:
(host, path) = host.split('/', 1)
if host.count(':') == 0:
port = 18880
if schema == 'https:':
port = 18883
return '%s//%s:%d/%s' % (schema, host, port, path), '%s:%s' % (host, port)
return url, host
#
# parser
#
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('-e', action='store', dest='url', default=None,
required=True,
help='specify the access point of the server in URL.')
p.add_argument('-f', action='store', dest='bfile', default=None,
help='specify the filename of the request.')
p.add_argument('-c', action='store', dest='cfile', default=None,
help='specify the file name of the configuration.')
p.add_argument('-x', action='store_true', dest='req_to_xml', default=False,
help='specify to send an XML request.')
p.add_argument('-X', action='store_true', dest='res_to_xml', default=False,
help='specify to output an XML response.')
p.add_argument('-s', action='store', dest='sec_lv', default=None,
help='specify the security level. 0, 1, or 2')
p.add_argument('-w', action='store', dest='wsdl', default=None,
help='specify the wsdl.')
p.add_argument('-d', action='store', dest='debug', default=0,
help='specify the debug level. 0, 1, or 2')
return p.parse_args()
#
# main
#
opt = parse_args()
if opt.url.startswith('https://'):
sec_lv = 1
sec_lv = opt.sec_lv
url, host = set_default_port(opt.url)
if opt.debug >= 1:
print 'DEBUG: connect to', host
#soapGetAddressLocation(opt.wsdl)
if opt.bfile != None:
fp = open(opt.bfile)
else:
fp = sys.stdin
src = fp.read()
if src == None:
print 'ERROR: src document is nothing'
exit(1)
fiap = fiapProto.fiapProto(debug=opt.debug)
#
# make a request
#
req_doc = ''
if opt.req_to_xml == True:
ctype = 'text/xml; charset=utf-8'
if re.match('^\<\?xml', src):
req_doc = src
else:
req_doc = fiap.JSONtoXML(src)
else:
ctype = 'text/json; charset=utf-8'
if re.match('^\<\?xml', src) == None:
req_doc = src
else:
req_doc = fiap.XMLtoJSON(src)
if req_doc == None:
print 'ERROR: %s' % fiap.getemsg()
exit(1)
if opt.debug >= 1:
print 'DEBUG: Request:', req_doc
#
# parse the configuration file if specified.
#
cf = fiapConfig.fiapConfig(opt.cfile, security_level=sec_lv, debug=opt.debug)
#
# send the request and get a response.
#
res = postrequest(url, body=req_doc, ctype=ctype, config=cf)
if res == None:
print 'ERROR(FIAP): ' + fiap.emsg
exit(1)
if opt.debug >= 1:
print 'DEBUG: Response:', res
#
# print the response
#
if opt.res_to_xml == True:
if re.match('^\<\?xml', res):
res_doc = res
else:
res_doc = fiap.JSONtoXML(res)
else:
if re.match('^\<\?xml', res):
res_doc = fiap.XMLtoJSON(res)
else:
res_doc = res
try:
res_doc = json.dumps(json.loads(res_doc), indent=2)
except ValueError as e:
print 'ERROR: JSON parse error', e
exit(1)
if req_doc == None:
print 'ERROR: %s' % fiap.getemsg()
exit(1)
print res_doc
| {
"content_hash": "9f3042b0e7fd9a28ef4cbc8e45c590c7",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 82,
"avg_line_length": 30.632653061224488,
"alnum_prop": 0.5733510992671552,
"repo_name": "momotaro98/fiapy",
"id": "6e8f3364e119b2875872764bb715e1f98233e55f",
"size": "7554",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fiapc.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "83924"
},
{
"name": "Shell",
"bytes": "1638"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sentry import analytics
class PluginEnabledEvent(analytics.Event):
type = "plugin.enabled"
attributes = (
analytics.Attribute("user_id"),
analytics.Attribute("organization_id"),
analytics.Attribute("project_id"),
analytics.Attribute("plugin"),
)
analytics.register(PluginEnabledEvent)
| {
"content_hash": "b1c8573877164b017ccac9c7bbd24c4e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 22.294117647058822,
"alnum_prop": 0.6886543535620053,
"repo_name": "mvaled/sentry",
"id": "686b01c62f5f4abe05c919a340b7938503c7ab12",
"size": "379",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/analytics/events/plugin_enabled.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
"""
This module contains all code for using the VMware SOAP API/SDK
"""
from time import sleep
import re
import os
import shutil
from suds.client import Client, WebFault
from suds.cache import ObjectCache
from suds.sudsobject import Property
from suds.plugin import MessagePlugin
from ovs.log.logHandler import LogHandler
logger = LogHandler('extensions', name='vmware sdk')
class NotAuthenticatedException(BaseException):
pass
def authenticated(force=False):
"""
Decorator to make that a login is executed in case the current session isn't valid anymore
@param force: Force a (re)login, as some methods also work when not logged in
"""
def wrapper(function):
def new_function(self, *args, **kwargs):
self.__doc__ = function.__doc__
try:
if force:
self._login()
return function(self, *args, **kwargs)
except WebFault as fault:
if 'The session is not authenticated' in str(fault):
logger.debug('Received WebFault authentication failure, logging in...')
self._login()
return function(self, *args, **kwargs)
raise
except NotAuthenticatedException:
logger.debug('Received NotAuthenticatedException, logging in...')
self._login()
return function(self, *args, **kwargs)
return new_function
return wrapper
class ValueExtender(MessagePlugin):
"""
Plugin for SUDS for compatibility with VMware SDK
"""
def addAttributeForValue(self, node):
"""
Adds an attribute to a given node
"""
if node.name == 'value':
node.set('xsi:type', 'xsd:string')
def marshalled(self, context):
"""
Hook up the plugin
"""
context.envelope.walk(self.addAttributeForValue)
class Sdk(object):
"""
This class contains all SDK related methods
"""
def __init__(self, host, login, passwd):
"""
Initializes the SDK
"""
self._host = host
self._username = login
self._password = passwd
self._sessionID = None
self._check_session = True
self._cache = ObjectCache()
self._cache.setduration(weeks=1)
self._client = Client('https://%s/sdk/vimService?wsdl' % host,
cache=self._cache,
cachingpolicy=1)
self._client.set_options(location='https://%s/sdk' % host,
plugins=[ValueExtender()])
service_reference = self._build_property('ServiceInstance')
self._serviceContent = self._client.service.RetrieveServiceContent(
service_reference)
# In case of an ESXi host, this would be 'HostAgent'
self._is_vcenter = self._serviceContent.about.apiType == 'VirtualCenter'
if not self._is_vcenter:
# pylint: disable=line-too-long
self._login()
self._esxHost = self._get_object(
self._serviceContent.rootFolder,
prop_type='HostSystem',
traversal={'name': 'FolderTraversalSpec',
'type': 'Folder',
'path': 'childEntity',
'traversal': {'name': 'DatacenterTraversalSpec',
'type': 'Datacenter',
'path': 'hostFolder',
'traversal': {'name': 'DFolderTraversalSpec',
'type': 'Folder',
'path': 'childEntity',
'traversal': {'name': 'ComputeResourceTravelSpec', # noqa
'type': 'ComputeResource',
'path': 'host'}}}},
properties=['name']
).obj_identifier
# pylint: enable=line-too-long
else:
self._esxHost = None
@authenticated(force=True)
def _get_vcenter_hosts(self):
"""
reload vCenter info (host name and summary)
"""
if not self._is_vcenter:
raise RuntimeError('Must be connected to a vCenter Server API.')
datacenter_info = self._get_object(
self._serviceContent.rootFolder,
prop_type='HostSystem',
traversal={'name': 'FolderTraversalSpec',
'type': 'Folder',
'path': 'childEntity',
'traversal': {'name': 'DatacenterTraversalSpec',
'type': 'Datacenter',
'path': 'hostFolder',
'traversal': {'name': 'DFolderTraversalSpec',
'type': 'Folder',
'path': 'childEntity',
'traversal': {'name': 'ComputeResourceTravelSpec', # noqa
'type': 'ComputeResource',
'path': 'host'}}}},
properties=['name', 'summary.runtime', 'config.virtualNicManagerInfo.netConfig']
)
return datacenter_info
def get_host_status_by_ip(self, host_ip):
"""
Return host status by ip, from vcenter info
Must be connected to a vcenter server api
"""
host = self._get_host_info_by_ip(host_ip)
return host.summary.runtime.powerState
def get_host_status_by_pk(self, pk):
"""
Return host status by pk, from vcenter info
Must be connected to a vcenter server api
"""
host = self._get_host_info_by_pk(pk)
return host.summary.runtime.powerState
def get_host_primary_key(self, host_ip):
"""
Return host primary key, based on current ip
Must be connected to a vcenter server api
"""
host = self._get_host_info_by_ip(host_ip)
return host.obj_identifier.value
def _get_host_info_by_ip(self, host_ip):
"""
Return HostSystem object by ip, from vcenter info
Must be connected to a vcenter server api
"""
datacenter_info = self._get_vcenter_hosts()
for host in datacenter_info:
for nic in host.config.virtualNicManagerInfo.netConfig.VirtualNicManagerNetConfig:
if nic.nicType == 'management':
for vnic in nic.candidateVnic:
if vnic.spec.ip.ipAddress == host_ip:
return host
raise RuntimeError('Host with ip {0} not found in datacenter info'.format(host_ip))
def _get_host_info_by_pk(self, pk):
"""
Return HostSystem object by pk, from vcenter info
Must be connected to a vcenter server api
"""
datacenter_info = self._get_vcenter_hosts()
for host in datacenter_info:
if host.obj_identifier.value == pk:
return host
def get_hosts(self):
"""
Gets a neutral list of all hosts available
"""
host_data = self._get_vcenter_hosts()
host_data = [] if host_data is None else host_data
hosts = {}
for host in host_data:
ips = []
for nic in host.config.virtualNicManagerInfo.netConfig.VirtualNicManagerNetConfig:
if nic.nicType == 'management':
for vnic in nic.candidateVnic:
ips.append(vnic.spec.ip.ipAddress)
hosts[host.obj_identifier.value] = {'name': host.name,
'ips': ips}
return hosts
def test_connection(self):
"""
Tests the connection
"""
self._login()
return True
def list_hosts_in_datacenter(self):
"""
return a list of registered host names in vCenter
must be connected to a vcenter server api
"""
datacenter_info = self._get_vcenter_hosts()
return [host.name for host in datacenter_info]
def validate_result(self, result, message=None):
"""
Validates a given result. Returning True if the task succeeded, raising an error if not
"""
if hasattr(result, '_type') and result._type == 'Task':
return self.validate_result(self.get_task_info(result), message)
elif hasattr(result, 'info'):
if result.info.state == 'success':
return True
elif result.info.state == 'error':
error = result.info.error.localizedMessage
raise Exception(('%s: %s' % (message, error)) if message else error)
raise Exception(('%s: %s' % (message, 'Unexpected result'))
if message else 'Unexpected result')
@authenticated()
def get_task_info(self, task):
"""
Loads the task details
"""
return self._get_object(task)
@authenticated()
def get_vm_ip_information(self):
"""
Get the IP information for all vms on a given esxi host
"""
esxhost = self._validate_host(None)
configuration = []
for vm in self._get_object(esxhost,
prop_type='VirtualMachine',
traversal={
'name': 'HostSystemTraversalSpec',
'type': 'HostSystem',
'path': 'vm'},
properties=['name', 'guest.net', 'config.files']):
vmi = {'id': str(vm.obj_identifier.value),
'vmxpath': str(vm.config.files.vmPathName),
'name': str(vm.name),
'net': []}
if vm.guest.net:
for net in vm.guest.net[0]:
vmi['net'].append({'mac': str(net.macAddress),
'ipaddresses': [str(i.ipAddress)
for i in net.ipConfig.ipAddress]})
configuration.append(vmi)
return configuration
@authenticated()
def exists(self, name=None, key=None):
"""
Checks whether a vm with a given name or key exists on a given esxi host
"""
esxhost = self._validate_host(None)
if name is not None or key is not None:
try:
if name is not None:
vms = [vm for vm in
self._get_object(esxhost,
prop_type='VirtualMachine',
traversal={'name': 'HostSystemTraversalSpec',
'type': 'HostSystem',
'path': 'vm'},
properties=['name']) if vm.name == name]
if len(vms) == 0:
return None
else:
return vms[0].obj_identifier
if key is not None:
return self._get_object(
self._build_property('VirtualMachine', key),
properties=['name']).obj_identifier
except:
return None
else:
raise Exception('A name or key should be passed.')
@authenticated()
def get_vm(self, key):
"""
Retreives a vm object, based on its key
"""
vmid = self.exists(key=key)
if vmid is None:
raise RuntimeError('Virtual Machine with key {} could not be found.'.format(key))
vm = self._get_object(vmid)
return vm
@authenticated()
def get_vms(self, ip, mountpoint):
"""
Get all vMachines using a given nfs share
"""
esxhost = self._validate_host(None)
datastore = self.get_datastore(ip, mountpoint)
filtered_vms = []
vms = self._get_object(esxhost,
prop_type='VirtualMachine',
traversal={'name': 'HostSystemTraversalSpec',
'type': 'HostSystem',
'path': 'vm'},
properties=['name', 'config'])
for vm in vms:
mapping = self._get_vm_datastore_mapping(vm)
if datastore.name in mapping:
filtered_vms.append(vm)
return filtered_vms
@authenticated()
def set_disk_mode(self, vmid, disks, mode, wait=True):
"""
Sets the disk mode for a set of disks
"""
config = self._client.factory.create('ns0:VirtualMachineConfigSpec')
config.deviceChange = []
disk_type = type(self._client.factory.create('ns0:VirtualDisk'))
vmid = self.exists(key=vmid)
vm = self._get_object(vmid)
for device in vm.config.hardware.devices:
if type(device) == disk_type and hasattr(device, 'backing') \
and device.backing.fileName in disks:
backing = self._client.factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
backing.diskMode = mode
device = self._client.factory.create('ns0:VirtualDisk')
device.backing = backing
diskSpec = self._client.factory.create(
'ns0:VirtualDeviceConfigSpec')
diskSpec.operation = 'edit'
diskSpec.fileOperation = None
diskSpec.device = device
config.deviceChange.append(diskSpec)
task = self._client.service.ReconfigVM_Task(vm.obj_identifier, config)
if wait:
self.wait_for_task(task)
return task
def _create_disk(self, factory, key, disk, unit, datastore):
"""
Creates a disk spec for a given backing device
Example for parameter disk: {'name': diskname, 'backingdevice': 'disk-flat.vmdk'}
"""
deviceInfo = factory.create('ns0:Description')
deviceInfo.label = disk['name']
deviceInfo.summary = 'Disk %s' % disk['name']
backing = factory.create('ns0:VirtualDiskFlatVer2BackingInfo')
backing.diskMode = 'persistent'
backing.fileName = '[%s] %s' % (datastore.name, disk['backingdevice'])
backing.thinProvisioned = True
device = factory.create('ns0:VirtualDisk')
device.controllerKey = key
device.key = -200 - unit
device.unitNumber = unit
device.deviceInfo = deviceInfo
device.backing = backing
diskSpec = factory.create('ns0:VirtualDeviceConfigSpec')
diskSpec.operation = 'add'
diskSpec.fileOperation = None
diskSpec.device = device
return diskSpec
def _create_file_info(self, factory, datastore):
"""
Creates a file info object
"""
fileInfo = factory.create('ns0:VirtualMachineFileInfo')
fileInfo.vmPathName = '[%s]' % datastore
return fileInfo
def _create_nic(self, factory, device_type, device_label, device_summary, network, unit):
"""
Creates a NIC spec
"""
deviceInfo = factory.create('ns0:Description')
deviceInfo.label = device_label
deviceInfo.summary = device_summary
backing = factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network
device = factory.create('ns0:%s' % device_type)
device.addressType = 'Generated'
device.wakeOnLanEnabled = True
device.controllerKey = 100 # PCI Controller
device.key = -300 - unit
device.unitNumber = unit
device.backing = backing
device.deviceInfo = deviceInfo
nicSpec = factory.create('ns0:VirtualDeviceConfigSpec')
nicSpec.operation = 'add'
nicSpec.fileOperation = None
nicSpec.device = device
return nicSpec
def _create_disk_controller(self, factory, key):
"""
Create a disk controller
"""
deviceInfo = self._client.factory.create('ns0:Description')
deviceInfo.label = 'SCSI controller 0'
deviceInfo.summary = 'LSI Logic SAS'
controller = factory.create('ns0:VirtualLsiLogicSASController')
controller.busNumber = 0
controller.key = key
controller.sharedBus = 'noSharing'
controller.deviceInfo = deviceInfo
controllerSpec = factory.create('ns0:VirtualDeviceConfigSpec')
controllerSpec.operation = 'add'
controllerSpec.fileOperation = None
controllerSpec.device = controller
return controllerSpec
def _create_option_value(self, factory, key, value):
"""
Create option values
"""
option = factory.create('ns0:OptionValue')
option.key = key
option.value = value
return option
@authenticated()
def copy_file(self, source, destination, wait=True):
"""
Copies a file on the datastore
"""
task = self._client.service.CopyDatastoreFile_Task(
_this=self._serviceContent.fileManager,
sourceName=source,
destinationName=destination)
if wait:
self.wait_for_task(task)
return task
@authenticated()
def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=True):
"""
Create a vm based on an existing vtemplate on specified tgt hypervisor
Raises RuntimeError if datastore is not available at (ip, mountpoint)
"""
esxhost = self._validate_host(None)
host_data = self._get_host_data(esxhost)
datastore = self.get_datastore(ip, mountpoint)
# Build basic config information
config = self._client.factory.create('ns0:VirtualMachineConfigSpec')
config.name = name
config.numCPUs = source_vm.config.hardware.numCPU
config.memoryMB = source_vm.config.hardware.memoryMB
config.guestId = source_vm.config.guestId
config.deviceChange = []
config.extraConfig = []
config.files = self._create_file_info(self._client.factory, datastore.name)
disk_controller_key = -101
config.deviceChange.append(
self._create_disk_controller(self._client.factory,
disk_controller_key))
# Add disk devices
for disk in disks:
config.deviceChange.append(
self._create_disk(self._client.factory, disk_controller_key,
disk, disks.index(disk), datastore))
# Add network
nw_type = type(self._client.factory.create('ns0:VirtualEthernetCardNetworkBackingInfo'))
for device in source_vm.config.hardware.device:
if hasattr(device, 'backing') and type(device.backing) == nw_type:
config.deviceChange.append(
self._create_nic(self._client.factory,
device.__class__.__name__,
device.deviceInfo.label,
device.deviceInfo.summary,
device.backing.deviceName,
device.unitNumber))
# Copy additional properties
extraconfigstoskip = ['nvram']
for item in source_vm.config.extraConfig:
if not item.key in extraconfigstoskip:
config.extraConfig.append(
self._create_option_value(self._client.factory,
item.key,
item.value))
task = self._client.service.CreateVM_Task(host_data['folder'],
config=config,
pool=host_data['resourcePool'],
host=host_data['host'])
if wait:
self.wait_for_task(task)
return task
@authenticated()
def clone_vm(self, vmid, name, disks, wait=True):
"""
Clone a existing VM configuration
@param vmid: unique id of the vm
@param name: name of the clone vm
@param disks: list of disks to use in vm configuration
@param kvmport: kvm port for the clone vm
@param esxhost: esx host identifier on which to clone the vm
@param wait: wait for task to complete or not (True/False)
"""
esxhost = self._validate_host(None)
host_data = self._get_host_data(esxhost)
source_vm_object = self.exists(key=vmid)
if not source_vm_object:
raise Exception('VM with key reference %s not found' % vmid)
source_vm = self._get_object(source_vm_object)
datastore = self._get_object(source_vm.datastore[0][0])
# Build basic config information
config = self._client.factory.create('ns0:VirtualMachineConfigSpec')
config.name = name
config.numCPUs = source_vm.config.hardware.numCPU
config.memoryMB = source_vm.config.hardware.memoryMB
config.guestId = source_vm.config.guestId
config.deviceChange = []
config.extraConfig = []
config.files = self._create_file_info(
self._client.factory, datastore.name)
disk_controller_key = -101
config.deviceChange.append(
self._create_disk_controller(self._client.factory,
disk_controller_key))
# Add disk devices
for disk in disks:
config.deviceChange.append(
self._create_disk(self._client.factory, disk_controller_key,
disk, disks.index(disk), datastore))
self.copy_file(
'[{0}] {1}'.format(datastore.name, '%s.vmdk'
% disk['name'].split('_')[-1].replace('-clone', '')),
'[{0}] {1}'.format(datastore.name, disk['backingdevice']))
# Add network
nw_type = type(self._client.factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo'))
for device in source_vm.config.hardware.device:
if hasattr(device, 'backing') and type(device.backing) == nw_type:
config.deviceChange.append(
self._create_nic(self._client.factory,
device.__class__.__name__,
device.deviceInfo.label,
device.deviceInfo.summary,
device.backing.deviceName,
device.unitNumber))
# Copy additional properties
extraconfigstoskip = ['nvram']
for item in source_vm.config.extraConfig:
if not item.key in extraconfigstoskip:
config.extraConfig.append(
self._create_option_value(self._client.factory,
item.key,
item.value))
task = self._client.service.CreateVM_Task(host_data['folder'],
config=config,
pool=host_data['resourcePool'],
host=host_data['host'])
if wait:
self.wait_for_task(task)
return task
@authenticated()
def get_vm(self, key):
vmid = self.exists(key=key)
if vmid is None:
raise RuntimeError('Virtual Machine with key {} could not be found.'.format(key))
vm = self._get_object(vmid)
return vm
@authenticated()
def get_datastore(self, ip, mountpoint):
"""
@param ip : hypervisor ip to query for datastore presence
@param mountpoint: nfs mountpoint on hypervisor
@rtype: sdk datastore object
@return: object when found else None
"""
datastore = None
esxhost = self._validate_host(None)
host_system = self._get_object(esxhost, properties=['datastore'])
for store in host_system.datastore[0]:
store = self._get_object(store)
if not store.summary.accessible:
raise RuntimeError('Datastore {0} is not accessible at mountpoint {1}'.format(store.name, mountpoint))
if hasattr(store.info, 'nas'):
if store.info.nas.remoteHost == ip and store.info.nas.remotePath == mountpoint:
datastore = store
return datastore
@authenticated()
def is_datastore_available(self, ip, mountpoint):
"""
@param ip : hypervisor ip to query for datastore presence
@param mountpoint: nfs mountpoint on hypervisor
@rtype: boolean
@return: True | False
"""
if self.get_datastore(ip, mountpoint):
return True
else:
return False
def make_agnostic_config(self, vm_object):
regex = '\[([^\]]+)\]\s(.+)'
match = re.search(regex, vm_object.config.files.vmPathName)
esxhost = self._validate_host(None)
config = {'name': vm_object.config.name,
'id': vm_object.obj_identifier.value,
'backing': {'filename': match.group(2),
'datastore': match.group(1)},
'disks': [],
'datastores': {}}
for device in vm_object.config.hardware.device:
if device.__class__.__name__ == 'VirtualDisk':
if device.backing is not None and \
device.backing.fileName is not None:
backingfile = device.backing.fileName
match = re.search(regex, backingfile)
if match:
filename = match.group(2)
backingfile = filename.replace('.vmdk', '-flat.vmdk')
config['disks'].append({'filename': filename,
'backingfilename': backingfile,
'datastore': match.group(1),
'name': device.deviceInfo.label,
'order': device.unitNumber})
host_system = self._get_object(esxhost, properties=['datastore'])
for store in host_system.datastore[0]:
store = self._get_object(store)
if hasattr(store.info, 'nas'):
config['datastores'][store.info.name] = '{}:{}'.format(store.info.nas.remoteHost,
store.info.nas.remotePath)
return config
@authenticated()
def delete_vm(self, vmid, storagedriver_mountpoint, storagedriver_storage_ip, devicename, wait=False):
"""
Delete a given vm
"""
machine = None
if vmid:
try:
machine = self._build_property('VirtualMachine', vmid)
except Exception as ex:
logger.error('SDK domain retrieve failed by vmid: {}'.format(ex))
elif storagedriver_mountpoint and storagedriver_storage_ip and devicename:
try:
machine_info = self.get_nfs_datastore_object(storagedriver_storage_ip, storagedriver_mountpoint, devicename)[0]
machine = self._build_property('VirtualMachine', machine_info.obj_identifier.value)
except Exception as ex:
logger.error('SDK domain retrieve failed by nfs datastore info: {}'.format(ex))
if machine:
task = self._client.service.Destroy_Task(machine)
if wait:
self.wait_for_task(task)
if storagedriver_mountpoint and devicename:
vmx_path = os.path.join(storagedriver_mountpoint, devicename)
if os.path.exists(vmx_path):
dir_name = os.path.dirname(vmx_path)
logger.debug('Removing leftover files in {0}'.format(dir_name))
try:
shutil.rmtree(dir_name)
logger.debug('Removed dir tree {}'.format(dir_name))
except Exception as exception:
logger.error('Failed to remove dir tree {0}. Reason: {1}'.format(dir_name, str(exception)))
return task
@authenticated()
def get_power_state(self, vmid):
"""
Get the power state of a given vm
"""
return self._get_object(self._build_property('VirtualMachine', vmid),
properties=['runtime.powerState']).runtime.powerState
@authenticated()
def mount_nfs_datastore(self, name, remote_host, remote_path):
"""
Mounts a given NFS export as a datastore
"""
esxhost = self._validate_host(None)
host = self._get_object(esxhost, properties=['datastore',
'name',
'configManager',
'configManager.datastoreSystem'])
for store in host.datastore[0]:
store = self._get_object(store)
if hasattr(store.info, 'nas'):
if store.info.name == name:
if store.info.nas.remoteHost == remote_host and \
store.info.nas.remotePath == remote_path:
# We'll remove this store, as it's identical to the once we'll add,
# forcing a refresh
self._client.service.RemoveDatastore(host.configManager.datastoreSystem,
store.obj_identifier)
break
else:
raise RuntimeError('A datastore {0} already exists, pointing to {1}:{2}'.format(
name, store.info.nas.remoteHost, store.info.nas.remotePath
))
spec = self._client.factory.create('ns0:HostNasVolumeSpec')
spec.accessMode = 'readWrite'
spec.localPath = name
spec.remoteHost = remote_host
spec.remotePath = remote_path
spec.type = 'nfs'
return self._client.service.CreateNasDatastore(host.configManager.datastoreSystem, spec)
@authenticated()
def wait_for_task(self, task):
"""
Wait for a task to be completed
"""
state = self.get_task_info(task).info.state
while state in ['running', 'queued']:
sleep(1)
state = self.get_task_info(task).info.state
@authenticated()
def get_nfs_datastore_object(self, ip, mountpoint, filename):
"""
ip : "10.130.12.200", string
mountpoint: "/srv/volumefs", string
filename: "cfovs001/vhd0(-flat).vmdk"
identify nfs datastore on this esx host based on ip and mount
check if filename is present on datastore
if file is .vmdk return VirtualDisk object for corresponding virtual disk
if file is .vmx return VirtualMachineConfigInfo for corresponding vm
@rtype: tuple
@return: A tuple. First item: vm config, second item: Device if a vmdk was given
"""
filename = filename.replace('-flat.vmdk', '.vmdk') # Support both -flat.vmdk and .vmdk
if not filename.endswith('.vmdk') and not filename.endswith('.vmx'):
raise ValueError('Unexpected filetype')
esxhost = self._validate_host(None)
datastore = self.get_datastore(ip, mountpoint)
if not datastore:
raise RuntimeError('Could not find datastore')
vms = self._get_object(esxhost,
prop_type='VirtualMachine',
traversal={'name': 'HostSystemTraversalSpec',
'type': 'HostSystem',
'path': 'vm'},
properties=['name', 'config'])
if not vms:
raise RuntimeError('No vMachines found')
for vm in vms:
mapping = self._get_vm_datastore_mapping(vm)
if datastore.name in mapping:
if filename in mapping[datastore.name]:
return vm, mapping[datastore.name][filename]
raise RuntimeError('Could not locate given file on the given datastore')
def _get_vm_datastore_mapping(self, vm):
"""
Creates a datastore mapping for a vm's devices
Structure
{<datastore name>: {<backing filename>: <device>,
<backing filename>: <device>},
<datastore name>: {<backing filename>: <device>}}
Example
{'datastore A': {'/machine1/machine1.vmx': <device>,
'/machine1/disk1.vmdk': <device>},
'datastore B': {'/machine1/disk2.vmdk': <device>}}
"""
def extract_names(backingfile, given_mapping, metadata=None):
match = re.search('\[([^\]]+)\]\s(.+)', backingfile)
if match:
datastore_name = match.group(1)
filename = match.group(2)
if datastore_name not in mapping:
given_mapping[datastore_name] = {}
given_mapping[datastore_name][filename] = metadata
return given_mapping
virtual_disk_type = self._client.factory.create('ns0:VirtualDisk')
flat_type = self._client.factory.create('ns0:VirtualDiskFlatVer2BackingInfo')
mapping = {}
for device in vm.config.hardware.device:
if isinstance(device, type(virtual_disk_type)):
if device.backing is not None and isinstance(device.backing, type(flat_type)):
mapping = extract_names(device.backing.fileName, mapping, device)
mapping = extract_names(vm.config.files.vmPathName, mapping)
return mapping
def _get_host_data(self, esxhost):
"""
Get host data for a given esxhost
"""
hostobject = self._get_object(
esxhost, properties=['parent', 'datastore', 'network'])
datastore = self._get_object(
hostobject.datastore[0][0], properties=['info']).info
computeresource = self._get_object(
hostobject.parent, properties=['resourcePool', 'parent'])
datacenter = self._get_object(
computeresource.parent, properties=['parent']).parent
vm_folder = self._get_object(
datacenter, properties=['vmFolder']).vmFolder
return {'host': esxhost,
'computeResource': computeresource,
'resourcePool': computeresource.resourcePool,
'datacenter': datacenter,
'folder': vm_folder,
'datastore': datastore,
'network': hostobject.network[0]}
def _get_host_iqn_mapping(self, esxhost, rescan=False):
"""
Get the IQN mapping for a given esx host, optionally rescanning the host
"""
# pylint: disable=line-too-long
regex = re.compile('^key-vim.host.PlugStoreTopology.Path-iqn.+?,(?P<iqn>iqn.*?),t,1-(?P<eui>eui.+)$') # noqa
# pylint: enable=line-too-long
hostobject = self._get_object(
esxhost, properties=['configManager.storageSystem'])
stg_ssystem = self._get_object(hostobject.configManager.storageSystem,
properties=['storageDeviceInfo',
'storageDeviceInfo.plugStoreTopology.device'])
if rescan:
# Force a rescan of the vmfs
self._client.service.RescanVmfs(stg_ssystem.obj_identifier)
stg_ssystem = self._get_object(
hostobject.configManager.storageSystem,
properties=['storageDeviceInfo',
'storageDeviceInfo.plugStoreTopology.device'])
device_info_mapping = {}
for disk in stg_ssystem.storageDeviceInfo.scsiLun:
device_info_mapping[disk.key] = disk.uuid
iqn_mapping = {}
for device in stg_ssystem.storageDeviceInfo.plugStoreTopology\
.device.HostPlugStoreTopologyDevice:
for path in device.path:
match = regex.search(path)
if match:
groups = match.groupdict()
iqn_mapping[groups['iqn']] = {'eui': groups['eui'],
'lun': device.lun,
'uuid': device_info_mapping[device.lun]}
return iqn_mapping
def _get_object(self, key_object, prop_type=None, traversal=None, properties=None):
"""
Gets an object based on a given set of query parameters. Only the requested properties
will be loaded. If no properties are specified, all will be loaded
"""
object_spec = self._client.factory.create('ns0:ObjectSpec')
object_spec.obj = key_object
property_spec = self._client.factory.create('ns0:PropertySpec')
property_spec.type = key_object._type if prop_type is None else prop_type
if properties is None:
property_spec.all = True
else:
property_spec.all = False
property_spec.pathSet = properties
if traversal is not None:
select_set_ptr = object_spec
while True:
select_set_ptr.selectSet = self._client.factory.create(
'ns0:TraversalSpec')
select_set_ptr.selectSet.name = traversal['name']
select_set_ptr.selectSet.type = traversal['type']
select_set_ptr.selectSet.path = traversal['path']
if 'traversal' in traversal:
traversal = traversal['traversal']
select_set_ptr = select_set_ptr.selectSet
else:
break
property_filter_spec = self._client.factory.create(
'ns0:PropertyFilterSpec')
property_filter_spec.objectSet = [object_spec]
property_filter_spec.propSet = [property_spec]
found_objects = self._client.service.RetrieveProperties(
self._serviceContent.propertyCollector,
[property_filter_spec]
)
if len(found_objects) > 0:
for item in found_objects:
item.obj_identifier = item.obj
del item.obj
if hasattr(item, 'missingSet'):
for missing_item in item.missingSet:
if missing_item.fault.fault.__class__.__name__ == 'NotAuthenticated':
raise NotAuthenticatedException()
for propSet in item.propSet:
if '.' in propSet.name:
working_item = item
path = str(propSet.name).split('.')
part_counter = 0
for part in path:
part_counter += 1
if part_counter < len(path):
if not part in working_item.__dict__:
setattr(working_item, part, type(part, (), {})())
working_item = working_item.__dict__[part]
else:
setattr(working_item, part, propSet.val)
else:
setattr(item, propSet.name, propSet.val)
del item.propSet
if len(found_objects) == 1:
return found_objects[0]
else:
return found_objects
return None
def _build_property(self, property_name, value=None):
"""
Create a property object with given name and value
"""
new_property = Property(property_name)
new_property._type = property_name
if value is not None:
new_property.value = value
return new_property
def _validate_host(self, host):
"""
Validates wheteher a given host is valid
"""
if host is None:
if self._is_vcenter:
raise Exception(
'A HostSystem reference is mandatory for a vCenter Server')
else:
return self._esxHost
else:
if hasattr(host, '_type') and host._type == 'HostSystem':
return self._get_object(host, properties=['name']).obj_identifier
else:
return self._get_object(
self._build_property('HostSystem', host),
properties=['name']).obj_identifier
def _login(self):
"""
Executes a logout (to make sure we're logged out), and logs in again
"""
self._logout()
self._sessionID = self._client.service.Login(
self._serviceContent.sessionManager,
self._username,
self._password,
None
).key
def _logout(self):
"""
Logs out the current session
"""
try:
self._client.service.Logout(self._serviceContent.sessionManager)
except:
pass
| {
"content_hash": "98ca0849595326264ec321ffdd388e28",
"timestamp": "",
"source": "github",
"line_count": 1023,
"max_line_length": 127,
"avg_line_length": 41.251221896383186,
"alnum_prop": 0.5281279620853081,
"repo_name": "mflu/openvstorage_centos",
"id": "75098324a8b6858fa708b35bcbda356534157349",
"size": "42781",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ovs/extensions/hypervisor/apis/vmware/sdk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10475"
},
{
"name": "JavaScript",
"bytes": "698676"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "1500612"
},
{
"name": "Shell",
"bytes": "16586"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "daily_expenses.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "d61ba09709332e5d17a5539af0c6288b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.7148936170212766,
"repo_name": "maths22/daily-expenses",
"id": "66567d4bfdd3cbafcc69327ed74f100a391e3e73",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1196"
},
{
"name": "Python",
"bytes": "9982"
}
],
"symlink_target": ""
} |
import datetime
import multiprocessing
import os
import socket
from django.http import JsonResponse
from cuckoo.common.files import Files
from cuckoo.core.database import Database
from cuckoo.core.rooter import rooter
from cuckoo.core.startup import check_version
from cuckoo.misc import cwd, version
from cuckoo.web.utils import json_fatal_response, api_get
db = Database()
updates = {}
def latest_updates():
"""Updates the latest Cuckoo version & blogposts at maximum once a day."""
next_check = datetime.datetime.now() - datetime.timedelta(days=1)
if updates and updates["timestamp"] > next_check:
return updates
# It is possible for check_version() to fail. In that case we'll just have
# Cuckoo wait another day to try again (for now anyway).
latest = check_version()
latest and updates.update(latest)
updates["timestamp"] = datetime.datetime.now()
class CuckooApi(object):
@api_get
def status(request):
"""
Returns a variety of information about both
Cuckoo and the operating system.
:return: Dictionary
"""
# In order to keep track of the diskspace statistics of the temporary
# directory we create a temporary file so we can statvfs() on that.
temp_file = Files.temp_put("")
paths = dict(
binaries=cwd("storage", "binaries"),
analyses=cwd("storage", "analyses"),
temporary=os.path.dirname(temp_file),
)
diskspace = {}
for key, path in paths.items():
if hasattr(os, "statvfs") and os.path.isdir(path):
stats = os.statvfs(path)
diskspace[key] = dict(
free=stats.f_bavail * stats.f_frsize,
total=stats.f_blocks * stats.f_frsize,
used=(stats.f_blocks - stats.f_bavail) * stats.f_frsize,
)
# Now we remove the temporary file and its parent directory.
os.unlink(temp_file)
# Get the CPU load.
if hasattr(os, "getloadavg"):
cpuload = os.getloadavg()
else:
cpuload = []
try:
cpucount = multiprocessing.cpu_count()
except NotImplementedError:
cpucount = 1
if os.path.isfile("/proc/meminfo"):
values = {}
for line in open("/proc/meminfo"):
key, value = line.split(":", 1)
values[key.strip()] = value.replace("kB", "").strip()
if "MemAvailable" in values and "MemTotal" in values:
memavail = int(values["MemAvailable"])
memtotal = int(values["MemTotal"])
memory = 100 - 100.0 * memavail / memtotal
else:
memory = memavail = memtotal = None
else:
memory = memavail = memtotal = None
latest_updates()
data = dict(
version=version,
hostname=socket.gethostname(),
machines=dict(
total=len(db.list_machines()),
available=db.count_machines_available()
),
tasks=dict(
total=db.count_tasks(),
pending=db.count_tasks("pending"),
running=db.count_tasks("running"),
completed=db.count_tasks("completed"),
reported=db.count_tasks("reported")
),
diskspace=diskspace,
cpucount=cpucount,
cpuload=cpuload,
memory=memory,
memavail=memavail,
memtotal=memtotal,
latest_version=updates.get("version"),
blogposts=updates.get("blogposts", []),
)
return JsonResponse({"status": True, "data": data})
@api_get
def vpn_status(request):
status = rooter("vpn_status")
if status is None:
return json_fatal_response("Rooter not available")
return JsonResponse({"status": True, "vpns": status})
| {
"content_hash": "2b8db5d90095406276021686ce0261f7",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 32.885245901639344,
"alnum_prop": 0.5685443668993021,
"repo_name": "cuckoobox/cuckoo",
"id": "5b36ab17bd84e6b626789df89eb2276aab1178a4",
"size": "4181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cuckoo/web/controllers/cuckoo/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9652"
},
{
"name": "CSS",
"bytes": "6810"
},
{
"name": "DTrace",
"bytes": "8609"
},
{
"name": "HTML",
"bytes": "233053"
},
{
"name": "JavaScript",
"bytes": "21397"
},
{
"name": "Makefile",
"bytes": "58"
},
{
"name": "Mako",
"bytes": "1078"
},
{
"name": "Python",
"bytes": "1101334"
},
{
"name": "Shell",
"bytes": "59602"
},
{
"name": "Visual Basic",
"bytes": "1101"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0001_initial'),
('eventlistingfordate', '0003_auto_20161019_1906'),
]
operations = [
migrations.AddField(
model_name='eventlistingpage',
name='boosted_search_terms',
field=models.TextField(blank=True, help_text='Words (space-separated) added here are boosted in relevance for search results increasing the chance of this appearing higher in the search results.'),
),
migrations.AddField(
model_name='eventlistingpage',
name='hero_image',
field=models.ForeignKey(related_name='+', blank=True, null=True, help_text=b'The hero image for this content.', to='icekit_plugins_image.Image'),
),
migrations.AddField(
model_name='eventlistingpage',
name='list_image',
field=models.ImageField(blank=True, help_text=b"image to use in listings. Default image is used if this isn't given", upload_to=b'icekit/listable/list_image/'),
),
]
| {
"content_hash": "94c07cd6f075595e388a826996974160",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 209,
"avg_line_length": 40.689655172413794,
"alnum_prop": 0.6423728813559322,
"repo_name": "ic-labs/django-icekit",
"id": "b1ac6b1084c69978bb2ad59b56b4af74130962c8",
"size": "1204",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "icekit_events/page_types/eventlistingfordate/migrations/0004_auto_20161115_1118.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from muntjac.ui.table import Table
from muntjac.data.util.indexed_container import IndexedContainer
class TestFooter(TestCase):
"""Test case for testing the footer API"""
def testFooterVisibility(self):
"""Tests if setting the footer visibility works properly"""
table = Table('Test table', self.createContainer())
# The footer should by default be hidden
self.assertFalse(table.isFooterVisible())
# Set footer visibility to tru should be reflected in the
# isFooterVisible() method
table.setFooterVisible(True)
self.assertTrue(table.isFooterVisible())
def testAddingFooters(self):
"""Tests adding footers to the columns"""
table = Table('Test table', self.createContainer())
# Table should not contain any footers at initialization
self.assertIsNone(table.getColumnFooter('col1'))
self.assertIsNone(table.getColumnFooter('col2'))
self.assertIsNone(table.getColumnFooter('col3'))
# Adding column footer
table.setColumnFooter('col1', 'Footer1')
self.assertEquals('Footer1', table.getColumnFooter('col1'))
# Add another footer
table.setColumnFooter('col2', 'Footer2')
self.assertEquals('Footer2', table.getColumnFooter('col2'))
# Add footer for a non-existing column
table.setColumnFooter('fail', 'FooterFail')
def testRemovingFooters(self):
"""Test removing footers"""
table = Table('Test table', self.createContainer())
table.setColumnFooter('col1', 'Footer1')
table.setColumnFooter('col2', 'Footer2')
# Test removing footer
self.assertNotEquals(table.getColumnFooter('col1'), None)
table.setColumnFooter('col1', None)
self.assertEquals(table.getColumnFooter('col1'), None)
# The other footer should still be there
self.assertNotEquals(table.getColumnFooter('col2'), None)
# Remove non-existing footer
table.setColumnFooter('fail', None)
@classmethod
def createContainer(cls):
"""Creates a container with three properties "col1,col2,col3"
with 100 items
@return: Returns the created table
"""
container = IndexedContainer()
container.addContainerProperty('col1', str, '')
container.addContainerProperty('col2', str, '')
container.addContainerProperty('col3', str, '')
for i in range(100):
item = container.addItem('item %d' % i)
item.getItemProperty('col1').setValue('first%d' % i)
item.getItemProperty('col2').setValue('middle%d' % i)
item.getItemProperty('col3').setValue('last%d' % i)
return container
| {
"content_hash": "5ada39bc0e69ac104164d68dc95d8850",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 69,
"avg_line_length": 33.54216867469879,
"alnum_prop": 0.6522988505747126,
"repo_name": "rwl/muntjac",
"id": "d6a251828244c652886226be5bcf8f5ddb6974ea",
"size": "2827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muntjac/test/server/component/table/test_footer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8602"
},
{
"name": "Java",
"bytes": "2243"
},
{
"name": "JavaScript",
"bytes": "32438"
},
{
"name": "Python",
"bytes": "3212361"
}
],
"symlink_target": ""
} |
import logging
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, UpdateView
from django.views.generic import View
from django.http import JsonResponse
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import get_template
from crispy_forms.utils import render_crispy_form
from rest_framework import viewsets
from rest_framework import permissions
from sorl.thumbnail import get_thumbnail
from .models import Photo
from .forms import UploadForm, BasicEditForm
from .serializers import BasicPhotoSerializer
from .permissions import IsOwnerOrReadOnly
logger = logging.getLogger('dev.console')
class PhotoDetail(DetailView):
model = Photo
class UploadView(FormView):
template_name = 'glair/upload.html'
form_class = UploadForm
def get_context_data(self, **kwargs):
context = super(UploadView, self).get_context_data(**kwargs)
context['edit_form'] = BasicEditForm()
return context
def form_valid(self, form):
form.instance.owner = self.request.user
image = form.save()
template = get_template('glair/photo_inline_edit.html')
html = template.render(Context({'object': image}))
return JsonResponse({
'pk': image.pk,
'html': html
})
class BasicPhotoViewset(viewsets.ModelViewSet):
queryset = Photo.objects.all()
serializer_class = BasicPhotoSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
def pre_save(self, obj):
obj.owner = self.request.user
def post_save(self, obj, *args, **kwargs):
if type(obj.tags) is list:
saved_photo = Photo.objects.get(pk=obj.pk)
for tag in obj.tags:
saved_photo.tags.add(tag) | {
"content_hash": "2c85f4f4b9f48330b86c6cba72c6458e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 68,
"avg_line_length": 30.258064516129032,
"alnum_prop": 0.7014925373134329,
"repo_name": "ateoto/django-glair",
"id": "2eed85f6134b04a6d1a8b1a5c829fbaee05f17cf",
"size": "1876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glair/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23105"
},
{
"name": "Python",
"bytes": "90293"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'SmsBillable', fields ['log_id']
db.create_index(u'smsbillables_smsbillable', ['log_id'])
def backwards(self, orm):
# Removing index on 'SmsBillable', fields ['log_id']
db.delete_index(u'smsbillables_smsbillable', ['log_id'])
models = {
u'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'smsbillables.smsbillable': {
'Meta': {'object_name': 'SmsBillable'},
'api_response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_sent': ('django.db.models.fields.DateField', [], {}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'gateway_fee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsGatewayFee']", 'null': 'True'}),
'gateway_fee_conversion_rate': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'null': 'True', 'max_digits': '20', 'decimal_places': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'log_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'usage_fee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsUsageFee']", 'null': 'True'})
},
u'smsbillables.smsgatewayfee': {
'Meta': {'object_name': 'SmsGatewayFee'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '4'}),
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsGatewayFeeCriteria']"}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Currency']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'smsbillables.smsgatewayfeecriteria': {
'Meta': {'object_name': 'SmsGatewayFeeCriteria'},
'backend_api_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'backend_instance': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'country_code': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'db_index': 'True', 'blank': 'True'})
},
u'smsbillables.smsusagefee': {
'Meta': {'object_name': 'SmsUsageFee'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '4'}),
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsUsageFeeCriteria']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'smsbillables.smsusagefeecriteria': {
'Meta': {'object_name': 'SmsUsageFeeCriteria'},
'direction': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['smsbillables']
| {
"content_hash": "995b1aaba6ea52044b687ddeeb05e398",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 169,
"avg_line_length": 68.18181818181819,
"alnum_prop": 0.5575238095238095,
"repo_name": "puttarajubr/commcare-hq",
"id": "a4e56452e9807fede3d74355b53328e3856dc3c0",
"size": "5268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/smsbillables/migrations/0010_add_index_to_log_id.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
from django.conf import settings
def get_site_url_root():
domain = getattr(settings, 'MY_SITE_DOMAIN', 'localhost')
protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'http')
port = getattr(settings, 'MY_SITE_PORT', '')
url = '%s://%s' % (protocol, domain)
if port:
url += ':%s' % port
return url
#
# Utility methods for transforming shapefile columns into useful representations
#
class static_namer():
"""
Name features with a single, static name.
"""
def __init__(self, name):
self.name = name
def __call__(self, feature):
return self.name
class index_namer():
"""
Name features with a static prefix, plus an iterating value.
"""
def __init__(self, prefix):
self.prefix = prefix
self.i = 0
def __call__(self, feature):
out = '%s%i' % (self.prefix, self.i)
self.i += 1
return out
class simple_namer():
"""
Name features with a joined combination of attributes, optionally passing
the result through a normalizing function.
"""
def __init__(self, attribute_names, seperator=' ', normalizer=None):
self.attribute_names = attribute_names
self.seperator = seperator
self.normalizer = normalizer
def __call__(self, feature):
attribute_values = map(str, map(feature.get, self.attribute_names))
name = self.seperator.join(attribute_values).strip()
if self.normalizer:
normed = self.normalizer(name)
if not normed:
raise ValueError('Failed to normalize \"%s\".' % name)
else:
name = normed
return name
| {
"content_hash": "37b7ff77287aba0eda6148b02d5b6015",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 27.174603174603174,
"alnum_prop": 0.5823598130841121,
"repo_name": "SpokesmanReview/django-boundaryservice",
"id": "59cea5c5fdee018bb00e496c297106a34f3aff5b",
"size": "1712",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "boundaryservice/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35973"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.getcwd())
from twisted.internet import reactor
from txjsonrpc.netstring.jsonrpc import Proxy
def printValue(value):
print "Result: %s" % str(value)
reactor.stop()
def printError(error):
print 'error', error
reactor.stop()
proxy = Proxy('127.0.0.1', 7080)
proxy.callRemote('add', 3, 5).addCallbacks(printValue, printError)
reactor.run()
| {
"content_hash": "1a8b0085281d614297dcc77dad2e5cca",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 18.318181818181817,
"alnum_prop": 0.7096774193548387,
"repo_name": "aborilov/txjsonrpc",
"id": "4b9f9162009c9bf7be8eafe31091c76868377871",
"size": "403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/tcp/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5256"
},
{
"name": "Python",
"bytes": "72022"
},
{
"name": "Shell",
"bytes": "2986"
}
],
"symlink_target": ""
} |
import os
from mock import Mock
from mock import ANY
from amen.audio import Audio
from queue_functions import do_work
from queue_functions import make_audio
from server import handle_post
from uploaders.s3 import get_url
class MockAnalysis():
def to_json(self):
return True
def faux_analysis(filepath):
return MockAnalysis()
def test_do_work():
faux_data = MockAnalysis()
faux_upload = Mock()
faux_analyze = Mock(return_value=faux_data)
filepath = "faux/filepath.wav"
audio_filename = "filepath.wav"
analysis_filename = "filepath.analysis.json"
do_work([filepath, "filepath.wav", "filepath.analysis.json", faux_upload, faux_analyze])
faux_analyze.assert_called_with(filepath)
faux_upload.assert_any_call(ANY, analysis_filename)
faux_upload.assert_any_call(filepath, audio_filename)
| {
"content_hash": "7e25133d7809cbb1e8d6a53cb0911bb4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 26.4375,
"alnum_prop": 0.7269503546099291,
"repo_name": "algorithmic-music-exploration/amen-server",
"id": "f80e3089e25ca654e7463c54b7c33a89b2abce16",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queue_functions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "8862"
},
{
"name": "Shell",
"bytes": "1014"
}
],
"symlink_target": ""
} |
import pytest
import requests
from json.decoder import JSONDecodeError
from backend.util.response.cart import CartSchema
from backend.util.response.error import ErrorSchema
def test_remove(domain_url, es_create):
prod_list = es_create("products", 2)
item_id = prod_list[0].meta["id"]
item_id_2 = prod_list[1].meta["id"]
with requests.Session() as sess:
sess.verify = False
response = sess.get(
domain_url
)
cookie = response.cookies.get("session")
response = sess.post(
domain_url + "/api/cart/update/%s/15" % item_id
)
data = response.json()
CartSchema().load(data)
assert response.status_code == 200
assert cookie != response.cookies.get("session")
cookie = response.cookies.get("session")
response = sess.post(
domain_url + "/api/cart/update/%s/3" % item_id_2
)
data = response.json()
CartSchema().load(data)
assert response.status_code == 200
assert cookie != response.cookies.get("session")
cookie = response.cookies.get("session")
response = sess.post(
domain_url + "/api/cart/remove/%s" % item_id
)
data = response.json()
CartSchema().load(data)
assert response.status_code == 200
assert cookie != response.cookies.get("session")
cookie = response.cookies.get("session")
response = sess.post(
domain_url + "/api/cart/remove/invalid"
)
data = response.json()
ErrorSchema().load(data)
assert response.status_code == 400
assert cookie != response.cookies.get("session")
response = sess.post(
domain_url + "/api/cart/remove/%s" % item_id_2
)
with pytest.raises(JSONDecodeError):
response.json()
assert response.status_code == 204
assert cookie != response.cookies.get("session")
| {
"content_hash": "7a3933534283d0c57fa4ae7670a67cbb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 60,
"avg_line_length": 27.26027397260274,
"alnum_prop": 0.5859296482412061,
"repo_name": "willrp/willbuyer",
"id": "7da23b24356753fbd4d2cfdc0c0fe1f837349ee8",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/tests/functional/api/cart/test_remove.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8210"
},
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "HTML",
"bytes": "999"
},
{
"name": "JavaScript",
"bytes": "162138"
},
{
"name": "Python",
"bytes": "250597"
}
],
"symlink_target": ""
} |
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.", DeprecationWarning)
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
:param mode: specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
"""
if extended is not None and mode is not None:
raise Exception("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
@since(2.4)
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
:param vertical: If set to ``True``, print output rows vertically (one line
per column value).
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
:param eager: Whether to checkpoint this :class:`DataFrame` immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.3)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
:param eager: Whether to checkpoint this :class:`DataFrame` immediately
.. note:: Experimental
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@since(2.0)
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
:param prefetchPartitions: If Spark should pre-fetch the next partition
before it is needed.
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
@since(3.0)
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. note:: The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala
in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
:param numPartitions: int, to specify the target number of partitions
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since("2.4.0")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
Note that due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default ``False``).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new :class:`DataFrame` that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise ValueError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@since(2.3)
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
:param colName: string, column name specified as a regex.
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the :class:`DataFrame`.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since("2.3.0")
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (eg, 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See also describe for basic statistics.
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
@since(2.3)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``,
this function allows different set of column names between two :class:`DataFrame`\\s.
Missing columns at each side, will be filled with null values.
The missing columns at left :class:`DataFrame` will be added at the end in the schema
of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(2.4)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL.
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, float, string, bool or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise ValueError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, float, string, list or None.
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (str, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
.. note:: This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param new: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(3.0)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
:param func: a function that takes and returns a :class:`DataFrame`.
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
@since(3.1)
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. note:: The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
.. note:: This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
.. note:: DeveloperApi
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise ValueError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
@since(3.1)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. note:: Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
.. note:: DeveloperApi
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
@since(3.1)
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
@since(3.1)
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "34f402154b990d010b6fbef2c9c6ca57",
"timestamp": "",
"source": "github",
"line_count": 2371,
"max_line_length": 100,
"avg_line_length": 38.985660059046815,
"alnum_prop": 0.5657164493968735,
"repo_name": "ptkool/spark",
"id": "db2ddde00c88127f1e603f332f8b79d3682acda5",
"size": "93220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/dataframe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "49912"
},
{
"name": "Batchfile",
"bytes": "31352"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26836"
},
{
"name": "Dockerfile",
"bytes": "9014"
},
{
"name": "HTML",
"bytes": "40529"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4138601"
},
{
"name": "JavaScript",
"bytes": "203741"
},
{
"name": "Makefile",
"bytes": "7776"
},
{
"name": "PLSQL",
"bytes": "9439"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3170742"
},
{
"name": "R",
"bytes": "1187040"
},
{
"name": "Roff",
"bytes": "36501"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32134649"
},
{
"name": "Shell",
"bytes": "204763"
},
{
"name": "TSQL",
"bytes": "474884"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
} |
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(value, *args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| {
"content_hash": "05e718de575e5be35ff26e8b215b5bd2",
"timestamp": "",
"source": "github",
"line_count": 1190,
"max_line_length": 82,
"avg_line_length": 39.6781512605042,
"alnum_prop": 0.6050151428510918,
"repo_name": "amisrs/one-eighty",
"id": "fad5e237733bfd14223835cdafa3d9699f7df08a",
"size": "47241",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "angular_flask/lib/python2.7/site-packages/jinja2/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1745293"
},
{
"name": "CSS",
"bytes": "23378"
},
{
"name": "HTML",
"bytes": "44161"
},
{
"name": "JavaScript",
"bytes": "53246"
},
{
"name": "Python",
"bytes": "18542714"
},
{
"name": "Shell",
"bytes": "6484"
}
],
"symlink_target": ""
} |
from django_couchdb_utils.auth.models import User
from datetime import datetime
from django.http import Http404
from django_openid import signed
from django.conf import settings
from .consumer import AuthConsumer
from .forms import RegistrationFormPasswordConfirm
from .models import UserOpenidAssociation
from django_openid.registration import RegistrationConsumer as DjangoOpenIDRegistrationConsumer
from couchdbkit.exceptions import ResourceNotFound
class RegistrationConsumer(AuthConsumer, DjangoOpenIDRegistrationConsumer):
RegistrationForm = RegistrationFormPasswordConfirm
def user_is_unconfirmed(self, user):
count = 0
try:
count = User.view('%s/users_by_username' % User._meta.app_label,
key=user.username, include_docs=True).count()
except ResourceNotFound:
return False
if count:
return True
return False
def mark_user_confirmed(self, user):
user.is_active = True
return user.store()
def mark_user_unconfirmed(self, user):
user.is_active = False
return user.store()
def create_user(self, request, data, openid=None):
user = User(
username = data['username'],
first_name = data.get('first_name', ''),
last_name = data.get('last_name', ''),
email = data.get('email', ''),
)
# Set OpenID, if one has been associated
if openid:
uoa = UserOpenidAssociation(user_id = user.username,
openid = openid,
created = datetime.now())
uoa.store()
# Set password, if one has been specified
password = data.get('password')
if password:
user.set_password(password)
else:
user.set_unusable_password()
user.store()
return user
def suggest_nickname(self, nickname):
"Return a suggested nickname that has not yet been taken"
if not nickname:
return ''
original_nickname = nickname
suffix = None
username_exists = True
while username_exists:
try:
username_exists = User.view('%s/users_by_username' % User._meta.app_label,
key=nickname, include_docs=True).count()
except ResourceNotFound:
username_exists = False
if not username_exists:
break
if suffix is None:
suffix = 1
else:
suffix += 1
nickname = original_nickname + str(suffix)
return nickname
def generate_confirm_code(self, user):
return signed.sign(str(user.id), key = (
self.confirm_link_secret or settings.SECRET_KEY
) + self.confirm_link_salt)
def do_password(self, request):
"Allow users to set a password on their account"
if request.user.is_anonymous():
return self.show_error(request, 'You need to log in first')
ChangePasswordForm = self.get_change_password_form_class(request)
if request.method == 'POST':
form = ChangePasswordForm(request.user, data=request.POST)
if form.is_valid():
u = request.user
u.set_password(form.cleaned_data['password'])
u.store()
return self.show_password_has_been_set(request)
else:
form = ChangePasswordForm(request.user)
return self.render(request, self.set_password_template, {
'form': form,
'action': request.path,
})
def do_c(self, request, token = ''):
if not token:
# TODO: show a form where they can paste in their token?
raise Http404
token = token.rstrip('/').encode('utf8')
try:
value = signed.unsign(token, key = (
self.confirm_link_secret or settings.SECRET_KEY
) + self.confirm_link_salt)
except signed.BadSignature:
return self.show_message(
request, self.invalid_token_message,
self.invalid_token_message + ': ' + token
)
# Only line change compared with django-openid
user_id = value
user = self.lookup_user_by_id(user_id)
if not user: # Maybe the user was deleted?
return self.show_error(request, self.r_user_not_found_message)
# Check user is NOT active but IS in the correct group
if self.user_is_unconfirmed(user):
# Confirm them
try:
user = User.view('%s/users_by_username' % User._meta.app_label,
key=user.username, include_docs=True).first()
except ResourceNotFound:
user = None
if user:
self.mark_user_confirmed(user)
self.log_in_user(request, user)
return self.on_registration_complete(request)
else:
return self.show_error(request, self.c_already_confirmed_message)
do_c.urlregex = '^c/([^/]+)/$'
| {
"content_hash": "aaef1b85ff88731f9d5f847d749be8af",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 95,
"avg_line_length": 38.29197080291971,
"alnum_prop": 0.5707205489897065,
"repo_name": "theju/django-couchdb-utils",
"id": "9c143849101fe85fedf65777defa880f0670c496",
"size": "5246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_couchdb_utils/openid_consumer/registration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "989"
},
{
"name": "Python",
"bytes": "36137"
}
],
"symlink_target": ""
} |
"""Top-level package for yamicache."""
__author__ = """Timothy McFadden"""
__version__ = "0.6.0"
from .yamicache import Cache, nocache, override_timeout # noqa: F401
| {
"content_hash": "bc442cc76c5111a0b99c0501fbaec0f7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 69,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.6627218934911243,
"repo_name": "mtik00/yamicache",
"id": "05873a089352f20a14f6c9ab4aecdb07b81295b9",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yamicache/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46773"
}
],
"symlink_target": ""
} |
from runner.koan import *
def function():
return "pineapple"
def function2():
return "tractor"
class Class(object):
def method(self):
return "parrot"
class AboutMethodBindings(Koan):
def test_methods_are_bound_to_an_object(self):
obj = Class()
self.assertEqual(True, obj.method.im_self == obj)
def test_methods_are_also_bound_to_a_function(self):
obj = Class()
self.assertEqual("parrot", obj.method())
self.assertEqual("parrot", obj.method.im_func(obj))
def test_functions_have_attributes(self):
self.assertEqual(31, len(dir(function)))
self.assertEqual(True, dir(function) == dir(Class.method.im_func))
def test_bound_methods_have_different_attributes(self):
obj = Class()
self.assertEqual(23, len(dir(obj.method)))
def test_setting_attributes_on_an_unbound_function(self):
function.cherries = 3
self.assertEqual(3, function.cherries)
def test_setting_attributes_on_a_bound_method_directly(self):
obj = Class()
try:
obj.method.cherries = 3
except AttributeError as ex:
self.assertMatch("'instancemethod' object has no attribute 'cherries'", ex[0])
def test_setting_attributes_on_methods_by_accessing_the_inner_function(self):
obj = Class()
obj.method.im_func.cherries = 3
self.assertEqual(3, obj.method.cherries)
def test_functions_can_have_inner_functions(self):
function2.get_fruit = function
self.assertEqual("pineapple", function2.get_fruit())
def test_inner_functions_are_unbound(self):
function2.get_fruit = function
try:
cls = function2.get_fruit.im_self
except AttributeError as ex:
self.assertMatch("'function' object has no attribute 'im_self'", ex[0])
# ------------------------------------------------------------------
class BoundClass(object):
def __get__(self, obj, cls):
return (self, obj, cls)
binding = BoundClass()
def test_get_descriptor_resolves_attribute_binding(self):
bound_obj, binding_owner, owner_type = self.binding
# Look at BoundClass.__get__():
# bound_obj = self
# binding_owner = obj
# owner_type = cls
self.assertEqual("BoundClass", bound_obj.__class__.__name__)
self.assertEqual("AboutMethodBindings", binding_owner.__class__.__name__)
self.assertEqual(AboutMethodBindings, owner_type)
# ------------------------------------------------------------------
class SuperColor(object):
def __init__(self):
self.choice = None
def __set__(self, obj, val):
self.choice = val
color = SuperColor()
def test_set_descriptor_changes_behavior_of_attribute_assignment(self):
self.assertEqual(None, self.color.choice)
self.color = 'purple'
self.assertEqual("purple", self.color.choice)
| {
"content_hash": "57e12f5e84710af8087bc0d1caeffbe6",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 90,
"avg_line_length": 31.557894736842105,
"alnum_prop": 0.5973982655103403,
"repo_name": "MartinRiese/python_koans",
"id": "442b8c61c179ccd1311c6f1ed8367c34d6334393",
"size": "3045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_method_bindings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "331082"
},
{
"name": "Shell",
"bytes": "1599"
}
],
"symlink_target": ""
} |
# NOTE: this requires PyAudio because it uses the Microphone class
import speech_recognition as sr
r = sr.Recognizer(key='AIzaSyCCHEwsmjuxssqgxZL_u_i8cBc3xq5cDFc')
with sr.Microphone() as source: # use the default microphone as the audio source
audio = r.listen(source) # listen for the first phrase and extract it into audio data
try:
print("You said " + r.recognize(audio)) # recognize speech using Google Speech Recognition
except LookupError: # speech is unintelligible
print("Could not understand audio")
| {
"content_hash": "86d0a8550791c70d14beeb497f570b42",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 113,
"avg_line_length": 63.3,
"alnum_prop": 0.636650868878357,
"repo_name": "growse/lcd-btctracker",
"id": "aa4bf5bba74eb25964a4c01327e5b390ae0dec3d",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "record.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1856"
}
],
"symlink_target": ""
} |
__author__ = 'Takashi Yahata (@paoneJP)'
__copyright__ = 'Copyright (c) 2014, Takashi Yahata'
__license__ = 'MIT License'
import sys
import os
from socketserver import ThreadingMixIn
from wsgiref.simple_server import WSGIServer
import win32service
import win32serviceutil
import win32event
SERVICE_NAME = 'SelfIop'
SERVICE_DISPLAY_NAME = 'SelfIop Service'
SERVICE_DESCRIPTION = 'OpenID Connect Self-issued OP Service by @paoneJP'
# --- fix up running environment for Windows Service
if not sys.stdout:
sys.stdout = open(os.devnull, 'w')
if not sys.stderr:
sys.stderr = open(os.devnull, 'w')
if os.path.basename(sys.argv[0]).lower() == 'pythonservice.exe':
import winreg
k = 'System\\CurrentControlSet\\Services\\' + SERVICE_NAME + \
'\\PythonClass'
p = winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE, k)
os.chdir(os.path.dirname(p))
else:
dir = os.path.dirname(sys.argv[0])
if dir:
os.chdir(dir)
# --- end of fixup
import config
from selfiopd import run
service_stop_event = win32event.CreateEvent(None, 0, 0, None)
class XWSGIServer(ThreadingMixIn, WSGIServer):
def service_actions(self):
super().service_actions()
r = win32event.WaitForSingleObject(service_stop_event, 0)
if r == win32event.WAIT_OBJECT_0:
self._BaseServer__shutdown_request = True
class Service(win32serviceutil.ServiceFramework):
_svc_name_ = SERVICE_NAME
_svc_display_name_ = SERVICE_DISPLAY_NAME
_svc_description_ = SERVICE_DESCRIPTION
def SvcDoRun(self):
run(server_class=XWSGIServer)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(service_stop_event)
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(Service)
| {
"content_hash": "ec18c7101f5d1243862806817d033a01",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 25.933333333333334,
"alnum_prop": 0.6709511568123393,
"repo_name": "paoneJP/python-SelfIop",
"id": "54c6d2453ac886edcb4be8ad0cbc7c23f1ae27b0",
"size": "1970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selfiop_win.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15849"
}
],
"symlink_target": ""
} |
"""
Expose top-level symbols that are safe for import *
"""
from __future__ import print_function, division, absolute_import
import re
from . import testing, decorators
from . import errors, special, types, config
# Re-export typeof
from .special import *
from .errors import *
from .pycc.decorators import export, exportmany
# Re-export all type names
from .types import *
# Re export decorators
jit = decorators.jit
autojit = decorators.autojit
njit = decorators.njit
# Re export vectorize decorators
from .npyufunc import vectorize, guvectorize
# Re export from_dtype
from .numpy_support import from_dtype
# Re-export test entrypoint
test = testing.test
# Try to initialize cuda
from . import cuda
__all__ = """
jit
autojit
njit
vectorize
guvectorize
export
exportmany
cuda
from_dtype
""".split() + types.__all__ + special.__all__ + errors.__all__
_min_llvmlite_version = (0, 6, 0)
def _ensure_llvm():
"""
Make sure llvmlite is operational.
"""
import warnings
import llvmlite
# Only look at the the major, minor and bugfix version numbers.
# Ignore other stuffs
regex = re.compile(r'(\d+)\.(\d+).(\d+)')
m = regex.match(llvmlite.__version__)
if m:
ver = tuple(map(int, m.groups()))
if ver < _min_llvmlite_version:
msg = ("Numba requires at least version %d.%d.%d of llvmlite.\n"
"Installed version is %s.\n"
"Please update llvmlite." %
(_min_llvmlite_version + (llvmlite.__version__,)))
raise ImportError(msg)
else:
# Not matching?
warnings.warn("llvmlite version format not recognized!")
from llvmlite.binding import check_jit_execution
check_jit_execution()
_ensure_llvm()
# Process initialization
# Should this be hooked into CPUContext instead?
from .targets.randomimpl import random_init
random_init()
del random_init
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| {
"content_hash": "cdb87ac6dcf47597bed07348c6d8d4b0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 22.51685393258427,
"alnum_prop": 0.6691616766467066,
"repo_name": "pitrou/numba",
"id": "9b46bb61e7536b0669bc15244808f6a73fa4f7c7",
"size": "2004",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "numba/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "241911"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "3236740"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
from django.dispatch import receiver, Signal
from django.utils.translation import ugettext as _
from django_enumfield import enum
from metrics import Collection
from tasks.models import Task
from tasks.models import TaskStatus
from tasks.models import task_status_changed
from common.validators import GithubRevisionValidator
test_run_status_changed = Signal(providing_args=['instance', ])
class TestRunStatus(enum.Enum):
PENDING = 0
IN_PROGRESS = 1
DONE = 2
ERROR = -1
labels = {
PENDING: _('Pending'),
IN_PROGRESS: _('In progress'),
DONE: _('Done'),
ERROR: _('Error'),
}
class TestRun(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(
max_length=250,
null=True,
blank=True
)
test_run_uri = models.URLField()
main_revision = models.CharField(
max_length=40,
validators=[GithubRevisionValidator, ]
)
secondary_revision = models.CharField(
max_length=40,
validators=[GithubRevisionValidator, ]
)
retries = models.IntegerField(null=True)
status = enum.EnumField(TestRunStatus, default=TestRunStatus.PENDING)
created = models.DateTimeField(auto_now_add=True)
__original_status = None
def save(self, *args, **kwargs):
is_new = self.id is None
super(TestRun, self).save(*args, **kwargs)
if self.__original_status != self.status:
test_run_status_changed.send(self.__class__, instance=self)
self.__original_status = self.status
if is_new:
Task(
test_run=self,
status=TaskStatus.PENDING
).save()
def save_and_run(self, *args, **kwargs):
self.save(*args, **kwargs)
for task in self.tasks.filter(job_id=None):
task.run()
@property
def app_commit(self):
return self.main_revision
@property
def config_commit(self):
return self.secondary_revision
@property
def completed(self):
return self.status in (TestRunStatus.DONE, TestRunStatus.ERROR)
def get_metrics(self):
results = self.results.all()[0:1]
if len(results) == 0:
return None
return Collection.unserialize(results[0].results)
def __repr__(self):
return "{0}(#{1}): {2}@{3}".format(
self.__class__.__name__,
self.id,
self.test_run_uri,
self.main_revision
)
def __unicode__(self):
return "{0} #{1}".format(self.__class__.__name__, self.id)
__str__ = __unicode__
@receiver(task_status_changed, sender=Task)
def task_changed(sender, instance, **kwargs):
""" Callback which synchronises TestRun status with Tasks statuses.
Here lies simple logic which tries to synch TestRun status with all sub-Tasks
statuses. It covers some very basic cases such as:
* starting Task starts parent TestRun
* any error in Task will result in setting error on parent TestRun
* finishing all sub-Task will mark TestRun as done
"""
test_run = instance.test_run
if instance.status == TaskStatus.IN_PROGRESS and test_run.status == TestRunStatus.PENDING:
test_run.status = TestRunStatus.IN_PROGRESS
test_run.save()
elif instance.status == TaskStatus.ERROR and test_run.status != TestRunStatus.ERROR:
test_run.status = TestRunStatus.ERROR
test_run.save()
elif instance.status == TestRunStatus.DONE and test_run.status == TestRunStatus.IN_PROGRESS:
test_run.status = TestRunStatus.DONE
test_run.save()
| {
"content_hash": "84bc853c5d847b3fd0e8d8931b6205fb",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 96,
"avg_line_length": 30.032258064516128,
"alnum_prop": 0.6323845327604726,
"repo_name": "harnash/sparrow",
"id": "3b653aed3b4235d888b18c6ba90213f1970ab887",
"size": "3748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/test_runs/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147134"
}
],
"symlink_target": ""
} |
"""Support for interfacing with the XBMC/Kodi JSON-RPC API."""
import asyncio
from collections import OrderedDict
from functools import wraps
import logging
import re
import socket
import urllib
import aiohttp
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, MEDIA_PLAYER_SCHEMA, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
DOMAIN, MEDIA_TYPE_CHANNEL, MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_PROXY_SSL,
CONF_TIMEOUT, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP, STATE_IDLE,
STATE_OFF, STATE_PAUSED, STATE_PLAYING)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import script
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.template import Template
from homeassistant.util.yaml import dump
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
EVENT_KODI_CALL_METHOD_RESULT = 'kodi_call_method_result'
CONF_TCP_PORT = 'tcp_port'
CONF_TURN_ON_ACTION = 'turn_on_action'
CONF_TURN_OFF_ACTION = 'turn_off_action'
CONF_ENABLE_WEBSOCKET = 'enable_websocket'
DEFAULT_NAME = 'Kodi'
DEFAULT_PORT = 8080
DEFAULT_TCP_PORT = 9090
DEFAULT_TIMEOUT = 5
DEFAULT_PROXY_SSL = False
DEFAULT_ENABLE_WEBSOCKET = True
DEPRECATED_TURN_OFF_ACTIONS = {
None: None,
'quit': 'Application.Quit',
'hibernate': 'System.Hibernate',
'suspend': 'System.Suspend',
'reboot': 'System.Reboot',
'shutdown': 'System.Shutdown'
}
# https://github.com/xbmc/xbmc/blob/master/xbmc/media/MediaType.h
MEDIA_TYPES = {
'music': MEDIA_TYPE_MUSIC,
'artist': MEDIA_TYPE_MUSIC,
'album': MEDIA_TYPE_MUSIC,
'song': MEDIA_TYPE_MUSIC,
'video': MEDIA_TYPE_VIDEO,
'set': MEDIA_TYPE_PLAYLIST,
'musicvideo': MEDIA_TYPE_VIDEO,
'movie': MEDIA_TYPE_MOVIE,
'tvshow': MEDIA_TYPE_TVSHOW,
'season': MEDIA_TYPE_TVSHOW,
'episode': MEDIA_TYPE_TVSHOW,
# Type 'channel' is used for radio or tv streams from pvr
'channel': MEDIA_TYPE_CHANNEL,
# Type 'audio' is used for audio media, that Kodi couldn't scroblle
'audio': MEDIA_TYPE_MUSIC,
}
SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_SHUFFLE_SET | \
SUPPORT_PLAY | SUPPORT_VOLUME_STEP
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TCP_PORT, default=DEFAULT_TCP_PORT): cv.port,
vol.Optional(CONF_PROXY_SSL, default=DEFAULT_PROXY_SSL): cv.boolean,
vol.Optional(CONF_TURN_ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_TURN_OFF_ACTION):
vol.Any(cv.SCRIPT_SCHEMA, vol.In(DEPRECATED_TURN_OFF_ACTIONS)),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Inclusive(CONF_USERNAME, 'auth'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'auth'): cv.string,
vol.Optional(CONF_ENABLE_WEBSOCKET, default=DEFAULT_ENABLE_WEBSOCKET):
cv.boolean,
})
SERVICE_ADD_MEDIA = 'kodi_add_to_playlist'
SERVICE_CALL_METHOD = 'kodi_call_method'
DATA_KODI = 'kodi'
ATTR_MEDIA_TYPE = 'media_type'
ATTR_MEDIA_NAME = 'media_name'
ATTR_MEDIA_ARTIST_NAME = 'artist_name'
ATTR_MEDIA_ID = 'media_id'
ATTR_METHOD = 'method'
MEDIA_PLAYER_ADD_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_TYPE): cv.string,
vol.Optional(ATTR_MEDIA_ID): cv.string,
vol.Optional(ATTR_MEDIA_NAME): cv.string,
vol.Optional(ATTR_MEDIA_ARTIST_NAME): cv.string,
})
MEDIA_PLAYER_CALL_METHOD_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_METHOD): cv.string,
}, extra=vol.ALLOW_EXTRA)
SERVICE_TO_METHOD = {
SERVICE_ADD_MEDIA: {
'method': 'async_add_media_to_playlist',
'schema': MEDIA_PLAYER_ADD_MEDIA_SCHEMA},
SERVICE_CALL_METHOD: {
'method': 'async_call_method',
'schema': MEDIA_PLAYER_CALL_METHOD_SCHEMA},
}
def _check_deprecated_turn_off(hass, turn_off_action):
"""Create an equivalent script for old turn off actions."""
if isinstance(turn_off_action, str):
method = DEPRECATED_TURN_OFF_ACTIONS[turn_off_action]
new_config = OrderedDict(
[('service', '{}.{}'.format(DOMAIN, SERVICE_CALL_METHOD)),
('data_template', OrderedDict(
[('entity_id', '{{ entity_id }}'),
('method', method)]))])
example_conf = dump(OrderedDict(
[(CONF_TURN_OFF_ACTION, new_config)]))
_LOGGER.warning(
"The '%s' action for turn off Kodi is deprecated and "
"will cease to function in a future release. You need to "
"change it for a generic Home Assistant script sequence, "
"which is, for this turn_off action, like this:\n%s",
turn_off_action, example_conf)
new_config['data_template'] = OrderedDict(
[(key, Template(value, hass))
for key, value in new_config['data_template'].items()])
turn_off_action = [new_config]
return turn_off_action
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Kodi platform."""
if DATA_KODI not in hass.data:
hass.data[DATA_KODI] = dict()
unique_id = None
# Is this a manual configuration?
if discovery_info is None:
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
tcp_port = config.get(CONF_TCP_PORT)
encryption = config.get(CONF_PROXY_SSL)
websocket = config.get(CONF_ENABLE_WEBSOCKET)
else:
name = "{} ({})".format(DEFAULT_NAME, discovery_info.get('hostname'))
host = discovery_info.get('host')
port = discovery_info.get('port')
tcp_port = DEFAULT_TCP_PORT
encryption = DEFAULT_PROXY_SSL
websocket = DEFAULT_ENABLE_WEBSOCKET
properties = discovery_info.get('properties')
if properties is not None:
unique_id = properties.get('uuid', None)
# Only add a device once, so discovered devices do not override manual
# config.
ip_addr = socket.gethostbyname(host)
if ip_addr in hass.data[DATA_KODI]:
return
# If we got an unique id, check that it does not exist already.
# This is necessary as netdisco does not deterministally return the same
# advertisement when the service is offered over multiple IP addresses.
if unique_id is not None:
for device in hass.data[DATA_KODI].values():
if device.unique_id == unique_id:
return
entity = KodiDevice(
hass,
name=name,
host=host, port=port, tcp_port=tcp_port, encryption=encryption,
username=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
turn_on_action=config.get(CONF_TURN_ON_ACTION),
turn_off_action=config.get(CONF_TURN_OFF_ACTION),
timeout=config.get(CONF_TIMEOUT), websocket=websocket,
unique_id=unique_id)
hass.data[DATA_KODI][ip_addr] = entity
async_add_entities([entity], update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {key: value for key, value in service.data.items()
if key != 'entity_id'}
entity_ids = service.data.get('entity_id')
if entity_ids:
target_players = [player
for player in hass.data[DATA_KODI].values()
if player.entity_id in entity_ids]
else:
target_players = hass.data[DATA_KODI].values()
update_tasks = []
for player in target_players:
await getattr(player, method['method'])(**params)
for player in target_players:
if player.should_poll:
update_coro = player.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
await asyncio.wait(update_tasks)
if hass.services.has_service(DOMAIN, SERVICE_ADD_MEDIA):
return
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]['schema']
hass.services.async_register(
DOMAIN, service, async_service_handler,
schema=schema)
def cmd(func):
"""Catch command exceptions."""
@wraps(func)
async def wrapper(obj, *args, **kwargs):
"""Wrap all command methods."""
import jsonrpc_base
try:
await func(obj, *args, **kwargs)
except jsonrpc_base.jsonrpc.TransportError as exc:
# If Kodi is off, we expect calls to fail.
if obj.state == STATE_OFF:
log_function = _LOGGER.info
else:
log_function = _LOGGER.error
log_function("Error calling %s on entity %s: %r",
func.__name__, obj.entity_id, exc)
return wrapper
class KodiDevice(MediaPlayerDevice):
"""Representation of a XBMC/Kodi device."""
def __init__(self, hass, name, host, port, tcp_port, encryption=False,
username=None, password=None,
turn_on_action=None, turn_off_action=None,
timeout=DEFAULT_TIMEOUT, websocket=True,
unique_id=None):
"""Initialize the Kodi device."""
import jsonrpc_async
import jsonrpc_websocket
self.hass = hass
self._name = name
self._unique_id = unique_id
self._media_position_updated_at = None
self._media_position = None
kwargs = {
'timeout': timeout,
'session': async_get_clientsession(hass),
}
if username is not None:
kwargs['auth'] = aiohttp.BasicAuth(username, password)
image_auth_string = "{}:{}@".format(username, password)
else:
image_auth_string = ""
http_protocol = 'https' if encryption else 'http'
ws_protocol = 'wss' if encryption else 'ws'
self._http_url = '{}://{}:{}/jsonrpc'.format(http_protocol, host, port)
self._image_url = '{}://{}{}:{}/image'.format(
http_protocol, image_auth_string, host, port)
self._ws_url = '{}://{}:{}/jsonrpc'.format(ws_protocol, host, tcp_port)
self._http_server = jsonrpc_async.Server(self._http_url, **kwargs)
if websocket:
# Setup websocket connection
self._ws_server = jsonrpc_websocket.Server(self._ws_url, **kwargs)
# Register notification listeners
self._ws_server.Player.OnPause = self.async_on_speed_event
self._ws_server.Player.OnPlay = self.async_on_speed_event
self._ws_server.Player.OnAVStart = self.async_on_speed_event
self._ws_server.Player.OnAVChange = self.async_on_speed_event
self._ws_server.Player.OnResume = self.async_on_speed_event
self._ws_server.Player.OnSpeedChanged = self.async_on_speed_event
self._ws_server.Player.OnSeek = self.async_on_speed_event
self._ws_server.Player.OnStop = self.async_on_stop
self._ws_server.Application.OnVolumeChanged = \
self.async_on_volume_changed
self._ws_server.System.OnQuit = self.async_on_quit
self._ws_server.System.OnRestart = self.async_on_quit
self._ws_server.System.OnSleep = self.async_on_quit
def on_hass_stop(event):
"""Close websocket connection when hass stops."""
self.hass.async_create_task(self._ws_server.close())
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, on_hass_stop)
else:
self._ws_server = None
# Script creation for the turn on/off config options
if turn_on_action is not None:
turn_on_action = script.Script(
self.hass, turn_on_action,
"{} turn ON script".format(self.name),
self.async_update_ha_state(True))
if turn_off_action is not None:
turn_off_action = script.Script(
self.hass, _check_deprecated_turn_off(hass, turn_off_action),
"{} turn OFF script".format(self.name))
self._turn_on_action = turn_on_action
self._turn_off_action = turn_off_action
self._enable_websocket = websocket
self._players = list()
self._properties = {}
self._item = {}
self._app_properties = {}
@callback
def async_on_speed_event(self, sender, data):
"""Handle player changes between playing and paused."""
self._properties['speed'] = data['player']['speed']
if not hasattr(data['item'], 'id'):
# If no item id is given, perform a full update
force_refresh = True
else:
# If a new item is playing, force a complete refresh
force_refresh = data['item']['id'] != self._item.get('id')
self.async_schedule_update_ha_state(force_refresh)
@callback
def async_on_stop(self, sender, data):
"""Handle the stop of the player playback."""
# Prevent stop notifications which are sent after quit notification
if self._players is None:
return
self._players = []
self._properties = {}
self._item = {}
self._media_position_updated_at = None
self._media_position = None
self.async_schedule_update_ha_state()
@callback
def async_on_volume_changed(self, sender, data):
"""Handle the volume changes."""
self._app_properties['volume'] = data['volume']
self._app_properties['muted'] = data['muted']
self.async_schedule_update_ha_state()
@callback
def async_on_quit(self, sender, data):
"""Reset the player state on quit action."""
self._players = None
self._properties = {}
self._item = {}
self._app_properties = {}
self.hass.async_create_task(self._ws_server.close())
async def _get_players(self):
"""Return the active player objects or None."""
import jsonrpc_base
try:
return await self.server.Player.GetActivePlayers()
except jsonrpc_base.jsonrpc.TransportError:
if self._players is not None:
_LOGGER.info("Unable to fetch kodi data")
_LOGGER.debug("Unable to fetch kodi data", exc_info=True)
return None
@property
def unique_id(self):
"""Return the unique id of the device."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
if self._players is None:
return STATE_OFF
if not self._players:
return STATE_IDLE
if self._properties['speed'] == 0:
return STATE_PAUSED
return STATE_PLAYING
async def async_ws_connect(self):
"""Connect to Kodi via websocket protocol."""
import jsonrpc_base
try:
ws_loop_future = await self._ws_server.ws_connect()
except jsonrpc_base.jsonrpc.TransportError:
_LOGGER.info("Unable to connect to Kodi via websocket")
_LOGGER.debug(
"Unable to connect to Kodi via websocket", exc_info=True)
return
async def ws_loop_wrapper():
"""Catch exceptions from the websocket loop task."""
try:
await ws_loop_future
except jsonrpc_base.TransportError:
# Kodi abruptly ends ws connection when exiting. We will try
# to reconnect on the next poll.
pass
# Update HA state after Kodi disconnects
self.async_schedule_update_ha_state()
# Create a task instead of adding a tracking job, since this task will
# run until the websocket connection is closed.
self.hass.loop.create_task(ws_loop_wrapper())
async def async_update(self):
"""Retrieve latest state."""
self._players = await self._get_players()
if self._players is None:
self._properties = {}
self._item = {}
self._app_properties = {}
return
if self._enable_websocket and not self._ws_server.connected:
self.hass.async_create_task(self.async_ws_connect())
self._app_properties = \
await self.server.Application.GetProperties(
['volume', 'muted']
)
if self._players:
player_id = self._players[0]['playerid']
assert isinstance(player_id, int)
self._properties = await self.server.Player.GetProperties(
player_id,
['time', 'totaltime', 'speed', 'live']
)
position = self._properties['time']
if self._media_position != position:
self._media_position_updated_at = dt_util.utcnow()
self._media_position = position
self._item = (await self.server.Player.GetItem(
player_id,
['title', 'file', 'uniqueid', 'thumbnail', 'artist',
'albumartist', 'showtitle', 'album', 'season', 'episode']
))['item']
else:
self._properties = {}
self._item = {}
self._app_properties = {}
self._media_position = None
self._media_position_updated_at = None
@property
def server(self):
"""Active server for json-rpc requests."""
if self._enable_websocket and self._ws_server.connected:
return self._ws_server
return self._http_server
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return not (self._enable_websocket and self._ws_server.connected)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if 'volume' in self._app_properties:
return self._app_properties['volume'] / 100.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._app_properties.get('muted')
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._item.get('uniqueid', None)
@property
def media_content_type(self):
"""Content type of current playing media.
If the media type cannot be detected, the player type is used.
"""
if MEDIA_TYPES.get(self._item.get('type')) is None and self._players:
return MEDIA_TYPES.get(self._players[0]['type'])
return MEDIA_TYPES.get(self._item.get('type'))
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._properties.get('live'):
return None
total_time = self._properties.get('totaltime')
if total_time is None:
return None
return (
total_time['hours'] * 3600 +
total_time['minutes'] * 60 +
total_time['seconds'])
@property
def media_position(self):
"""Position of current playing media in seconds."""
time = self._properties.get('time')
if time is None:
return None
return (
time['hours'] * 3600 +
time['minutes'] * 60 +
time['seconds'])
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
return self._media_position_updated_at
@property
def media_image_url(self):
"""Image url of current playing media."""
thumbnail = self._item.get('thumbnail')
if thumbnail is None:
return None
url_components = urllib.parse.urlparse(thumbnail)
if url_components.scheme == 'image':
return '{}/{}'.format(
self._image_url,
urllib.parse.quote_plus(thumbnail))
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
item = self._item
return item.get('title') or item.get('label') or item.get('file')
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return self._item.get('showtitle')
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return self._item.get('season')
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return self._item.get('episode')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._item.get('album')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
artists = self._item.get('artist', [])
if artists:
return artists[0]
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
artists = self._item.get('albumartist', [])
if artists:
return artists[0]
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = SUPPORT_KODI
if self._turn_on_action is not None:
supported_features |= SUPPORT_TURN_ON
if self._turn_off_action is not None:
supported_features |= SUPPORT_TURN_OFF
return supported_features
@cmd
async def async_turn_on(self):
"""Execute turn_on_action to turn on media player."""
if self._turn_on_action is not None:
await self._turn_on_action.async_run(
variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_on requested but turn_on_action is none")
@cmd
async def async_turn_off(self):
"""Execute turn_off_action to turn off media player."""
if self._turn_off_action is not None:
await self._turn_off_action.async_run(
variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_off requested but turn_off_action is none")
@cmd
async def async_volume_up(self):
"""Volume up the media player."""
assert (
await self.server.Input.ExecuteAction('volumeup')) == 'OK'
@cmd
async def async_volume_down(self):
"""Volume down the media player."""
assert (
await self.server.Input.ExecuteAction('volumedown')) == 'OK'
@cmd
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
return self.server.Application.SetVolume(int(volume * 100))
@cmd
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.server.Application.SetMute(mute)
async def async_set_play_state(self, state):
"""Handle play/pause/toggle."""
players = await self._get_players()
if players is not None and players:
await self.server.Player.PlayPause(
players[0]['playerid'], state)
@cmd
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state('toggle')
@cmd
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(True)
@cmd
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(False)
@cmd
async def async_media_stop(self):
"""Stop the media player."""
players = await self._get_players()
if players:
await self.server.Player.Stop(players[0]['playerid'])
async def _goto(self, direction):
"""Handle for previous/next track."""
players = await self._get_players()
if players:
if direction == 'previous':
# First seek to position 0. Kodi goes to the beginning of the
# current track if the current track is not at the beginning.
await self.server.Player.Seek(players[0]['playerid'], 0)
await self.server.Player.GoTo(
players[0]['playerid'], direction)
@cmd
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('next')
@cmd
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('previous')
@cmd
async def async_media_seek(self, position):
"""Send seek command."""
players = await self._get_players()
time = {}
time['milliseconds'] = int((position % 1) * 1000)
position = int(position)
time['seconds'] = int(position % 60)
position /= 60
time['minutes'] = int(position % 60)
position /= 60
time['hours'] = int(position)
if players:
await self.server.Player.Seek(players[0]['playerid'], time)
@cmd
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player.
This method must be run in the event loop and returns a coroutine.
"""
if media_type == "CHANNEL":
return self.server.Player.Open(
{"item": {"channelid": int(media_id)}})
if media_type == "PLAYLIST":
return self.server.Player.Open(
{"item": {"playlistid": int(media_id)}})
return self.server.Player.Open(
{"item": {"file": str(media_id)}})
async def async_set_shuffle(self, shuffle):
"""Set shuffle mode, for the first player."""
if not self._players:
raise RuntimeError("Error: No active player.")
await self.server.Player.SetShuffle(
{"playerid": self._players[0]['playerid'], "shuffle": shuffle})
async def async_call_method(self, method, **kwargs):
"""Run Kodi JSONRPC API method with params."""
import jsonrpc_base
_LOGGER.debug("Run API method %s, kwargs=%s", method, kwargs)
result_ok = False
try:
result = await getattr(self.server, method)(**kwargs)
result_ok = True
except jsonrpc_base.jsonrpc.ProtocolError as exc:
result = exc.args[2]['error']
_LOGGER.error("Run API method %s.%s(%s) error: %s",
self.entity_id, method, kwargs, result)
except jsonrpc_base.jsonrpc.TransportError:
result = None
_LOGGER.warning("TransportError trying to run API method "
"%s.%s(%s)", self.entity_id, method, kwargs)
if isinstance(result, dict):
event_data = {'entity_id': self.entity_id,
'result': result,
'result_ok': result_ok,
'input': {'method': method, 'params': kwargs}}
_LOGGER.debug("EVENT kodi_call_method_result: %s", event_data)
self.hass.bus.async_fire(EVENT_KODI_CALL_METHOD_RESULT,
event_data=event_data)
return result
async def async_add_media_to_playlist(
self, media_type, media_id=None, media_name='ALL', artist_name=''):
"""Add a media to default playlist (i.e. playlistid=0).
First the media type must be selected, then
the media can be specified in terms of id or
name and optionally artist name.
All the albums of an artist can be added with
media_name="ALL"
"""
import jsonrpc_base
params = {"playlistid": 0}
if media_type == "SONG":
if media_id is None:
media_id = await self.async_find_song(
media_name, artist_name)
if media_id:
params["item"] = {"songid": int(media_id)}
elif media_type == "ALBUM":
if media_id is None:
if media_name == "ALL":
await self.async_add_all_albums(artist_name)
return
media_id = await self.async_find_album(
media_name, artist_name)
if media_id:
params["item"] = {"albumid": int(media_id)}
else:
raise RuntimeError("Unrecognized media type.")
if media_id is not None:
try:
await self.server.Playlist.Add(params)
except jsonrpc_base.jsonrpc.ProtocolError as exc:
result = exc.args[2]['error']
_LOGGER.error("Run API method %s.Playlist.Add(%s) error: %s",
self.entity_id, media_type, result)
except jsonrpc_base.jsonrpc.TransportError:
_LOGGER.warning("TransportError trying to add playlist to %s",
self.entity_id)
else:
_LOGGER.warning("No media detected for Playlist.Add")
async def async_add_all_albums(self, artist_name):
"""Add all albums of an artist to default playlist (i.e. playlistid=0).
The artist is specified in terms of name.
"""
artist_id = await self.async_find_artist(artist_name)
albums = await self.async_get_albums(artist_id)
for alb in albums['albums']:
await self.server.Playlist.Add(
{"playlistid": 0, "item": {"albumid": int(alb['albumid'])}})
async def async_clear_playlist(self):
"""Clear default playlist (i.e. playlistid=0)."""
return self.server.Playlist.Clear({"playlistid": 0})
async def async_get_artists(self):
"""Get artists list."""
return await self.server.AudioLibrary.GetArtists()
async def async_get_albums(self, artist_id=None):
"""Get albums list."""
if artist_id is None:
return await self.server.AudioLibrary.GetAlbums()
return (await self.server.AudioLibrary.GetAlbums(
{"filter": {"artistid": int(artist_id)}}))
async def async_find_artist(self, artist_name):
"""Find artist by name."""
artists = await self.async_get_artists()
try:
out = self._find(
artist_name, [a['artist'] for a in artists['artists']])
return artists['artists'][out[0][0]]['artistid']
except KeyError:
_LOGGER.warning("No artists were found: %s", artist_name)
return None
async def async_get_songs(self, artist_id=None):
"""Get songs list."""
if artist_id is None:
return await self.server.AudioLibrary.GetSongs()
return (await self.server.AudioLibrary.GetSongs(
{"filter": {"artistid": int(artist_id)}}))
async def async_find_song(self, song_name, artist_name=''):
"""Find song by name and optionally artist name."""
artist_id = None
if artist_name != '':
artist_id = await self.async_find_artist(artist_name)
songs = await self.async_get_songs(artist_id)
if songs['limits']['total'] == 0:
return None
out = self._find(song_name, [a['label'] for a in songs['songs']])
return songs['songs'][out[0][0]]['songid']
async def async_find_album(self, album_name, artist_name=''):
"""Find album by name and optionally artist name."""
artist_id = None
if artist_name != '':
artist_id = await self.async_find_artist(artist_name)
albums = await self.async_get_albums(artist_id)
try:
out = self._find(
album_name, [a['label'] for a in albums['albums']])
return albums['albums'][out[0][0]]['albumid']
except KeyError:
_LOGGER.warning("No albums were found with artist: %s, album: %s",
artist_name, album_name)
return None
@staticmethod
def _find(key_word, words):
key_word = key_word.split(' ')
patt = [re.compile(
'(^| )' + k + '( |$)', re.IGNORECASE) for k in key_word]
out = [[i, 0] for i in range(len(words))]
for i in range(len(words)):
mtc = [p.search(words[i]) for p in patt]
rate = [m is not None for m in mtc].count(True)
out[i][1] = rate
return sorted(out, key=lambda out: out[1], reverse=True)
| {
"content_hash": "948d3acc14ef36d89a9315b408d6e068",
"timestamp": "",
"source": "github",
"line_count": 963,
"max_line_length": 79,
"avg_line_length": 35.63032191069574,
"alnum_prop": 0.5892399160643507,
"repo_name": "aequitas/home-assistant",
"id": "661ebd86187720ba1b880e08c7aac97dcc42ae27",
"size": "34312",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/kodi/media_player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from future.builtins import bytes, str
from unittest import skipUnless
import warnings
from django.conf import settings as django_settings
from mezzanine.conf import settings, registry, register_setting
from mezzanine.conf.models import Setting
from mezzanine.utils.tests import TestCase
class ConfTests(TestCase):
@skipUnless(False, "Only run manually - see Github issue #1126")
def test_threading_race(self):
import multiprocessing.pool
import random
from django.db import connections
type_modifiers = {int: lambda s: s + 1,
float: lambda s: s + 1.0,
bool: lambda s: not s,
str: lambda s: s + u"test",
bytes: lambda s: s + b"test"}
# Store a non-default value for every editable setting in the database
editable_settings = {}
for setting in registry.values():
if setting["editable"]:
modified = type_modifiers[setting["type"]](setting["default"])
Setting.objects.create(name=setting["name"], value=modified)
editable_settings[setting["name"]] = modified
# Make our child threads use this thread's connections. Recent SQLite
# do support access from multiple threads for in-memory databases, but
# Django doesn't support it currently - so we have to resort to this
# workaround, taken from Django's LiveServerTestCase.
# See Django ticket #12118 for discussion.
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if (conn.vendor == 'sqlite' and
conn.settings_dict['NAME'] == ':memory:'):
# Explicitly enable thread-shareability for this connection
conn._old_allow_thread_sharing = conn.allow_thread_sharing
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
def initialise_thread():
for alias, connection in connections_override.items():
connections[alias] = connection
thread_pool = multiprocessing.pool.ThreadPool(8, initialise_thread)
def retrieve_setting(setting_name):
return setting_name, getattr(settings, setting_name)
def choose_random_setting(length=5000):
choices = list(editable_settings)
for _ in range(length):
yield random.choice(choices)
try:
for setting in thread_pool.imap_unordered(retrieve_setting,
choose_random_setting()):
name, retrieved_value = setting
self.assertEqual(retrieved_value, editable_settings[name])
finally:
for conn in connections_override.values():
conn.allow_thread_sharing = conn._old_allow_thread_sharing
del conn._old_allow_thread_sharing
Setting.objects.all().delete()
def test_settings(self):
"""
Test that an editable setting can be overridden with a DB
value and that the data type is preserved when the value is
returned back out of the DB. Also checks to ensure no
unsupported types are defined for editable settings.
"""
settings.clear_cache()
# Find an editable setting for each supported type.
names_by_type = {}
for setting in registry.values():
if setting["editable"] and setting["type"] not in names_by_type:
names_by_type[setting["type"]] = setting["name"]
# Create a modified value for each setting and save it.
values_by_name = {}
for (setting_type, setting_name) in names_by_type.items():
setting_value = registry[setting_name]["default"]
if setting_type in (int, float):
setting_value += 1
elif setting_type is bool:
setting_value = not setting_value
elif setting_type is str:
setting_value += u"test"
elif setting_type is bytes:
setting_value += b"test"
else:
setting = "%s: %s" % (setting_name, setting_type)
self.fail("Unsupported setting type for %s" % setting)
values_by_name[setting_name] = setting_value
Setting.objects.create(name=setting_name, value=setting_value)
# Load the settings and make sure the DB values have persisted.
for (name, value) in values_by_name.items():
self.assertEqual(getattr(settings, name), value)
def test_editable_override(self):
"""
Test that an editable setting is always overridden by a settings.py
setting of the same name.
"""
settings.clear_cache()
Setting.objects.all().delete()
django_settings.FOO = "Set in settings.py"
Setting.objects.create(name="FOO", value="Set in database")
first_value = settings.FOO
settings.SITE_TITLE # Triggers access?
second_value = settings.FOO
self.assertEqual(first_value, second_value)
def test_bytes_conversion(self):
settings.clear_cache()
register_setting(name="BYTES_TEST_SETTING", editable=True, default=b"")
Setting.objects.create(name="BYTES_TEST_SETTING",
value="A unicode value")
self.assertEqual(settings.BYTES_TEST_SETTING, b"A unicode value")
def test_invalid_value_warning(self):
"""
Test that a warning is raised when a database setting has an invalid
value, i.e. one that can't be converted to the correct Python type.
"""
settings.clear_cache()
register_setting(name="INVALID_INT_SETTING", editable=True, default=0)
Setting.objects.create(name="INVALID_INT_SETTING", value='zero')
with warnings.catch_warnings():
warning_re = r'The setting \w+ should be of type'
warnings.filterwarnings('error', warning_re, UserWarning)
with self.assertRaises(UserWarning):
settings.INVALID_INT_SETTING
self.assertEqual(settings.INVALID_INT_SETTING, 0)
def test_unregistered_setting(self):
"""
Test that accessing any editable setting will delete all Settings
with no corresponding registered setting from the database.
"""
settings.clear_cache()
register_setting(name="REGISTERED_SETTING", editable=True, default="")
Setting.objects.create(name="UNREGISTERED_SETTING", value='')
with self.assertRaises(AttributeError):
settings.UNREGISTERED_SETTING
qs = Setting.objects.filter(name="UNREGISTERED_SETTING")
self.assertEqual(qs.count(), 1)
# This triggers Settings._load(), which deletes unregistered Settings
settings.REGISTERED_SETTING
self.assertEqual(qs.count(), 0)
def test_conflicting_setting(self):
"""
Test that conflicting settings raise a warning and use the settings.py
value instead of the value from the database.
"""
settings.clear_cache()
register_setting(name="CONFLICTING_SETTING", editable=True, default=1)
Setting.objects.create(name="CONFLICTING_SETTING", value=2)
settings.CONFLICTING_SETTING = 3
with warnings.catch_warnings():
warning_re = ("These settings are defined in both "
"settings\.py and the database")
warnings.filterwarnings('error', warning_re, UserWarning)
with self.assertRaises(UserWarning):
settings.CONFLICTING_SETTING
self.assertEqual(settings.CONFLICTING_SETTING, 3)
del settings.CONFLICTING_SETTING
def test_modeltranslation_configuration(self):
"""
Test that modeltranslation is properly configured in settings.
"""
if settings.USE_MODELTRANSLATION:
self.assertTrue(settings.USE_I18N)
def test_editable_caching(self):
"""
Test the editable setting caching behavior.
"""
# Ensure usage with no current request does not break caching
from mezzanine.core.request import _thread_local
del _thread_local.request
setting = Setting.objects.create(name='SITE_TITLE', value="Mezzanine")
original_site_title = settings.SITE_TITLE
setting.value = "Foobar"
setting.save()
new_site_title = settings.SITE_TITLE
setting.delete()
self.assertNotEqual(original_site_title, new_site_title)
| {
"content_hash": "40826fe2e7b680a91f883f949e35345b",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 39.60538116591928,
"alnum_prop": 0.6166213768115942,
"repo_name": "ryneeverett/mezzanine",
"id": "8e3b426479461b09e6b5f525e1342c820972ec8e",
"size": "8832",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mezzanine/conf/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60312"
},
{
"name": "HTML",
"bytes": "90044"
},
{
"name": "JavaScript",
"bytes": "453175"
},
{
"name": "Python",
"bytes": "690921"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
import os
from django.core.management import execute_from_command_line
os.environ["DJANGO_SETTINGS_MODULE"] = "lino_book.projects.workflows.settings"
execute_from_command_line(sys.argv)
| {
"content_hash": "76886df4a6059803f076b334abf08cab",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 82,
"avg_line_length": 25.3,
"alnum_prop": 0.7035573122529645,
"repo_name": "lino-framework/book",
"id": "a517409a8164911511d8848adb84721ebd1f99b1",
"size": "275",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino_book/projects/workflows/manage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
import json
import os
import pgpm.lib.utils.db
import psycopg2
import psycopg2.extras
class GlobalConfiguration(object):
"""
stores properties of schema configuration
"""
description = ""
license = ""
owner_role = ""
user_roles = []
def __init__(self, default_config_path='~/.pgpmconfig', extra_config_path=None):
"""
populates properties with config data
"""
global_config_dict = None
default_config = None
extra_config = None
if default_config_path:
default_config_full_path = os.path.abspath(os.path.expanduser(default_config_path))
if os.path.isfile(default_config_full_path):
default_config_file = open(default_config_full_path)
default_config = json.load(default_config_file)
default_config_file.close()
if extra_config_path:
extra_config_full_path = os.path.abspath(os.path.expanduser(extra_config_path))
if os.path.isfile(extra_config_full_path):
extra_config_file = open(extra_config_full_path)
extra_config = json.load(extra_config_file)
extra_config_file.close()
if default_config and extra_config:
global_config_dict = dict(default_config, **extra_config)
elif default_config:
global_config_dict = default_config
elif extra_config:
global_config_dict = extra_config
self.global_config_dict = global_config_dict
self.connection_sets = []
if self.global_config_dict:
if 'connection_sets' in self.global_config_dict:
for item in self.global_config_dict['connection_sets']:
if item['type'] == 'RESDB':
conn = psycopg2.connect(item['connection_string'],
connection_factory=pgpm.lib.utils.db.MegaConnection)
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute(item['payload'])
result_tuple = cur.fetchall()
self.connection_sets = self.connection_sets + result_tuple
cur.close()
conn.close()
if item['type'] == 'LIST':
self.connection_sets = self.connection_sets + item['payload']
def get_list_connections(self, environment, product, unique_name_list=None, is_except=False):
"""
Gets list of connections that satisfy the filter by environment, product and (optionally) unique DB names
:param environment: Environment name
:param product: Product name
:param unique_name_list: list of unique db aliases
:param is_except: take the connections with aliases provided or, the other wat around, take all the rest
:return: list of dictionaries with connections
"""
return_list = []
for item in self.connection_sets:
if unique_name_list:
if item['unique_name']:
if is_except:
if item['environment'] == environment and item['product'] == product and \
(item['unique_name'] not in unique_name_list):
return_list.append(item)
elif not is_except:
if item['environment'] == environment and item['product'] == product and \
(item['unique_name'] in unique_name_list):
return_list.append(item)
else:
if item['environment'] == environment and item['product'] == product:
return_list.append(item)
return return_list
| {
"content_hash": "7096b5e6badd59ac8654bf41abad9524",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 113,
"avg_line_length": 44.18390804597701,
"alnum_prop": 0.559573361082206,
"repo_name": "affinitas/pgpm",
"id": "80778f36c8f5b3f3bacfd678821e04043ab42f4c",
"size": "3844",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pgpm/utils/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "34178"
},
{
"name": "Python",
"bytes": "113485"
}
],
"symlink_target": ""
} |
"""
module for accessing a USB HID YubiKey NEO
"""
# Copyright (c) 2012 Yubico AB
# See the file COPYING for licence statement.
__all__ = [
# constants
'uri_identifiers',
# functions
# classes
'YubiKeyNEO_USBHID',
'YubiKeyNEO_USBHIDError'
]
import struct
import binascii
from .yubico_version import __version__
from .yubikey_defs import SLOT, MODE
from . import yubikey_usb_hid
from . import yubikey_base
from . import yubikey_frame
from . import yubico_exception
from . import yubico_util
# commands from ykdef.h
_ACC_CODE_SIZE = 6 # Size of access code to re-program device
_NDEF_DATA_SIZE = 54
# from nfcdef.h
_NDEF_URI_TYPE = ord('U')
_NDEF_TEXT_TYPE = ord('T')
# From nfcforum-ts-rtd-uri-1.0.pdf
uri_identifiers = [
(0x01, "http://www.",),
(0x02, "https://www.",),
(0x03, "http://",),
(0x04, "https://",),
(0x05, "tel:",),
(0x06, "mailto:",),
(0x07, "ftp://anonymous:anonymous@",),
(0x08, "ftp://ftp.",),
(0x09, "ftps://",),
(0x0a, "sftp://",),
(0x0b, "smb://",),
(0x0c, "nfs://",),
(0x0d, "ftp://",),
(0x0e, "dav://",),
(0x0f, "news:",),
(0x10, "telnet://",),
(0x11, "imap:",),
(0x12, "rtsp://",),
(0x13, "urn:",),
(0x14, "pop:",),
(0x15, "sip:",),
(0x16, "sips:",),
(0x17, "tftp:",),
(0x18, "btspp://",),
(0x19, "btl2cap://",),
(0x1a, "btgoep://",),
(0x1b, "tcpobex://",),
(0x1c, "irdaobex://",),
(0x1d, "file://",),
(0x1e, "urn:epc:id:",),
(0x1f, "urn:epc:tag:",),
(0x20, "urn:epc:pat:",),
(0x21, "urn:epc:raw:",),
(0x22, "urn:epc:",),
(0x23, "urn:nfc:",),
]
_NDEF_SLOTS = {
1: SLOT.NDEF,
2: SLOT.NDEF2
}
class YubiKeyNEO_USBHIDError(yubico_exception.YubicoError):
""" Exception raised for errors with the NEO USB HID communication. """
class YubiKeyNEO_USBHIDCapabilities(yubikey_usb_hid.YubiKeyUSBHIDCapabilities):
"""
Capabilities of current YubiKey NEO.
"""
def have_challenge_response(self, mode):
return self.version >= (3, 0, 0)
def have_configuration_slot(self, slot):
if self.version < (3, 0, 0):
return (slot == 1)
return slot in [1, 2]
def have_nfc_ndef(self, slot=1):
if self.version < (3, 0, 0):
return slot == 1
return slot in [1, 2]
def have_scanmap(self):
return self.version >= (3, 0, 0)
def have_device_config(self):
return self.version >= (3, 0, 0)
def have_usb_mode(self, mode):
if not self.have_device_config():
return False
mode &= ~MODE.FLAG_EJECT # Mask away eject flag
return mode in [0, 1, 2, 3, 4, 5, 6]
class YubiKeyNEO_USBHID(yubikey_usb_hid.YubiKeyUSBHID):
"""
Class for accessing a YubiKey NEO over USB HID.
The NEO is very similar to the original YubiKey (YubiKeyUSBHID)
but does add the NDEF "slot".
The NDEF is the tag the YubiKey emmits over it's NFC interface.
"""
model = 'YubiKey NEO'
description = 'YubiKey NEO'
_capabilities_cls = YubiKeyNEO_USBHIDCapabilities
def __init__(self, debug=False, skip=0, hid_device=None):
"""
Find and connect to a YubiKey NEO (USB HID).
Attributes :
skip -- number of YubiKeys to skip
debug -- True or False
"""
super(YubiKeyNEO_USBHID, self).__init__(debug, skip, hid_device)
if self.version_num() >= (2, 1, 4,) and \
self.version_num() <= (2, 1, 9,):
self.description = 'YubiKey NEO BETA'
elif self.version_num() < (3, 0, 0):
raise yubikey_base.YubiKeyVersionError("Incorrect version for %s" % self)
def write_ndef(self, ndef, slot=1):
"""
Write an NDEF tag configuration to the YubiKey NEO.
"""
if not self.capabilities.have_nfc_ndef(slot):
raise yubikey_base.YubiKeyVersionError("NDEF slot %i unsupported in %s" % (slot, self))
return self._device._write_config(ndef, _NDEF_SLOTS[slot])
def init_device_config(self, **kwargs):
return YubiKeyNEO_DEVICE_CONFIG(**kwargs)
def write_device_config(self, device_config):
"""
Write a DEVICE_CONFIG to the YubiKey NEO.
"""
if not self.capabilities.have_usb_mode(device_config._mode):
raise yubikey_base.YubiKeyVersionError("USB mode: %02x not supported for %s" % (device_config._mode, self))
return self._device._write_config(device_config, SLOT.DEVICE_CONFIG)
def write_scan_map(self, scanmap=None):
if not self.capabilities.have_scanmap():
raise yubikey_base.YubiKeyVersionError("Scanmap not supported in %s" % self)
return self._device._write_config(YubiKeyNEO_SCAN_MAP(scanmap), SLOT.SCAN_MAP)
class YubiKeyNEO_NDEF(object):
"""
Class allowing programming of a YubiKey NEO NDEF.
"""
ndef_type = _NDEF_URI_TYPE
ndef_str = None
access_code = yubico_util.chr_byte(0x0) * _ACC_CODE_SIZE
# For _NDEF_URI_TYPE
ndef_uri_rt = 0x0 # No prepending
# For _NDEF_TEXT_TYPE
ndef_text_lang = b'en'
ndef_text_enc = 'UTF-8'
def __init__(self, data, access_code = None):
self.ndef_str = data
if access_code is not None:
self.access_code = access_code
def text(self, encoding = 'UTF-8', language = 'en'):
"""
Configure parameters for NDEF type TEXT.
@param encoding: The encoding used. Should be either 'UTF-8' or 'UTF16'.
@param language: ISO/IANA language code (see RFC 3066).
"""
self.ndef_type = _NDEF_TEXT_TYPE
self.ndef_text_lang = language
self.ndef_text_enc = encoding
return self
def type(self, url = False, text = False, other = None):
"""
Change the NDEF type.
"""
if (url, text, other) == (True, False, None):
self.ndef_type = _NDEF_URI_TYPE
elif (url, text, other) == (False, True, None):
self.ndef_type = _NDEF_TEXT_TYPE
elif (url, text, type(other)) == (False, False, int):
self.ndef_type = other
else:
raise YubiKeyNEO_USBHIDError("Bad or conflicting NDEF type specified")
return self
def to_string(self):
"""
Return the current NDEF as a string (always 64 bytes).
"""
data = self.ndef_str
if self.ndef_type == _NDEF_URI_TYPE:
data = self._encode_ndef_uri_type(data)
elif self.ndef_type == _NDEF_TEXT_TYPE:
data = self._encode_ndef_text_params(data)
if len(data) > _NDEF_DATA_SIZE:
raise YubiKeyNEO_USBHIDError("NDEF payload too long")
# typedef struct {
# unsigned char len; // Payload length
# unsigned char type; // NDEF type specifier
# unsigned char data[NDEF_DATA_SIZE]; // Payload size
# unsigned char curAccCode[ACC_CODE_SIZE]; // Access code
# } YKNDEF;
#
fmt = '< B B %ss %ss' % (_NDEF_DATA_SIZE, _ACC_CODE_SIZE)
first = struct.pack(fmt,
len(data),
self.ndef_type,
data.ljust(_NDEF_DATA_SIZE, b'\0'),
self.access_code,
)
#crc = 0xffff - yubico_util.crc16(first)
#second = first + struct.pack('<H', crc) + self.unlock_code
return first
def to_frame(self, slot=SLOT.NDEF):
"""
Return the current configuration as a YubiKeyFrame object.
"""
data = self.to_string()
payload = data.ljust(64, b'\0')
return yubikey_frame.YubiKeyFrame(command = slot, payload = payload)
def _encode_ndef_uri_type(self, data):
"""
Implement NDEF URI Identifier Code.
This is a small hack to replace some well known prefixes (such as http://)
with a one byte code. If the prefix is not known, 0x00 is used.
"""
t = 0x0
for (code, prefix) in uri_identifiers:
if data[:len(prefix)].decode('latin-1').lower() == prefix:
t = code
data = data[len(prefix):]
break
data = yubico_util.chr_byte(t) + data
return data
def _encode_ndef_text_params(self, data):
"""
Prepend language and enconding information to data, according to
nfcforum-ts-rtd-text-1-0.pdf
"""
status = len(self.ndef_text_lang)
if self.ndef_text_enc == 'UTF16':
status = status & 0b10000000
return yubico_util.chr_byte(status) + self.ndef_text_lang + data
class YubiKeyNEO_DEVICE_CONFIG(object):
"""
Class allowing programming of a YubiKey NEO DEVICE_CONFIG.
"""
_mode = MODE.OTP
_cr_timeout = 0
_auto_eject_time = 0
def __init__(self, mode=MODE.OTP):
self._mode = mode
def cr_timeout(self, timeout = 0):
"""
Configure the challenge-response timeout in seconds.
"""
self._cr_timeout = timeout
return self
def auto_eject_time(self, auto_eject_time = 0):
"""
Configure the auto eject time in 10x seconds.
"""
self._auto_eject_time = auto_eject_time
return self
def to_string(self):
"""
Return the current DEVICE_CONFIG as a string (always 4 bytes).
"""
fmt = '<BBH'
first = struct.pack(
fmt,
self._mode,
self._cr_timeout,
self._auto_eject_time
)
#crc = 0xffff - yubico_util.crc16(first)
#second = first + struct.pack('<H', crc)
return first
def to_frame(self, slot=SLOT.DEVICE_CONFIG):
"""
Return the current configuration as a YubiKeyFrame object.
"""
data = self.to_string()
payload = data.ljust(64, b'\0')
return yubikey_frame.YubiKeyFrame(command=slot, payload=payload)
class YubiKeyNEO_SCAN_MAP(object):
"""
Class allowing programming of a YubiKey NEO scan map.
"""
def __init__(self, scanmap=None):
if scanmap:
if scanmap.startswith(b'h:'):
scanmap = binascii.unhexlify(scanmap[2:])
if len(scanmap) != 45:
raise yubico_exception.InputError('Scan map must be exactly 45 bytes')
self.scanmap = scanmap
def to_frame(self, slot=SLOT.SCAN_MAP):
"""
Return the current configuration as a YubiKeyFrame object.
"""
payload = self.scanmap.ljust(64, b'\0')
return yubikey_frame.YubiKeyFrame(command=slot, payload=payload)
| {
"content_hash": "e9d35285e6319e12ef57380577ee0c82",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 119,
"avg_line_length": 30.662857142857142,
"alnum_prop": 0.5639209839731644,
"repo_name": "Yubico/python-yubico-dpkg",
"id": "0f4334a70a7f28b8b32fe0524d5b4c9439db45c1",
"size": "10732",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yubico/yubikey_neo_usb_hid.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Groff",
"bytes": "3704"
},
{
"name": "Python",
"bytes": "121622"
}
],
"symlink_target": ""
} |
# Config
API_INTERFACE = 'mudbjson' #mudbjson, sspanelv2, sspanelv3, sspanelv3ssr, glzjinmod, legendsockssr, muapiv2(not support)
UPDATE_TIME = 60
SERVER_PUB_ADDR = '127.0.0.1' # mujson_mgr need this to generate ssr link
#mudb
MUDB_FILE = 'mudb.json'
# Mysql
MYSQL_CONFIG = 'usermysql.json'
# API
MUAPI_CONFIG = 'usermuapi.json'
| {
"content_hash": "1786746e6c0900dad1d9b90404ce33ac",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 120,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7283582089552239,
"repo_name": "xuwei95/shadowsocksr",
"id": "2dab5a3d2f8c79e580de62ad70969321b8e09e9b",
"size": "337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apiconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "216"
},
{
"name": "CSS",
"bytes": "2354"
},
{
"name": "HTML",
"bytes": "16018"
},
{
"name": "Python",
"bytes": "397788"
},
{
"name": "Shell",
"bytes": "15699"
}
],
"symlink_target": ""
} |
"""Extend the basic Accessory and Bridge functions."""
from __future__ import annotations
import logging
from pyhap.accessory import Accessory, Bridge
from pyhap.accessory_driver import AccessoryDriver
from pyhap.const import CATEGORY_OTHER
from pyhap.util import callback as pyhap_callback
from homeassistant.components import cover
from homeassistant.components.media_player import MediaPlayerDeviceClass
from homeassistant.components.remote import SUPPORT_ACTIVITY
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_HW_VERSION,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SERVICE,
ATTR_SUPPORTED_FEATURES,
ATTR_SW_VERSION,
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_TYPE,
LIGHT_LUX,
PERCENTAGE,
STATE_ON,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
__version__,
)
from homeassistant.core import Context, callback as ha_callback, split_entity_id
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.util.decorator import Registry
from .const import (
ATTR_DISPLAY_NAME,
ATTR_INTEGRATION,
ATTR_VALUE,
BRIDGE_MODEL,
BRIDGE_SERIAL_NUMBER,
CHAR_BATTERY_LEVEL,
CHAR_CHARGING_STATE,
CHAR_HARDWARE_REVISION,
CHAR_STATUS_LOW_BATTERY,
CONF_FEATURE_LIST,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LOW_BATTERY_THRESHOLD,
DEFAULT_LOW_BATTERY_THRESHOLD,
DOMAIN,
EVENT_HOMEKIT_CHANGED,
HK_CHARGING,
HK_NOT_CHARGABLE,
HK_NOT_CHARGING,
MANUFACTURER,
MAX_MANUFACTURER_LENGTH,
MAX_MODEL_LENGTH,
MAX_SERIAL_LENGTH,
MAX_VERSION_LENGTH,
SERV_ACCESSORY_INFO,
SERV_BATTERY_SERVICE,
SERVICE_HOMEKIT_RESET_ACCESSORY,
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
)
from .util import (
accessory_friendly_name,
async_dismiss_setup_message,
async_show_setup_message,
cleanup_name_for_homekit,
convert_to_float,
format_version,
validate_media_player_features,
)
_LOGGER = logging.getLogger(__name__)
SWITCH_TYPES = {
TYPE_FAUCET: "Valve",
TYPE_OUTLET: "Outlet",
TYPE_SHOWER: "Valve",
TYPE_SPRINKLER: "Valve",
TYPE_SWITCH: "Switch",
TYPE_VALVE: "Valve",
}
TYPES: Registry[str, type[HomeAccessory]] = Registry()
def get_accessory(hass, driver, state, aid, config): # noqa: C901
"""Take state and return an accessory object if supported."""
if not aid:
_LOGGER.warning(
'The entity "%s" is not supported, since it '
"generates an invalid aid, please change it",
state.entity_id,
)
return None
a_type = None
name = config.get(CONF_NAME, state.name)
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if state.domain == "alarm_control_panel":
a_type = "SecuritySystem"
elif state.domain in ("binary_sensor", "device_tracker", "person"):
a_type = "BinarySensor"
elif state.domain == "climate":
a_type = "Thermostat"
elif state.domain == "cover":
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
if device_class in (
cover.CoverDeviceClass.GARAGE,
cover.CoverDeviceClass.GATE,
) and features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE):
a_type = "GarageDoorOpener"
elif (
device_class == cover.CoverDeviceClass.WINDOW
and features & cover.SUPPORT_SET_POSITION
):
a_type = "Window"
elif features & cover.SUPPORT_SET_POSITION:
a_type = "WindowCovering"
elif features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE):
a_type = "WindowCoveringBasic"
elif features & cover.SUPPORT_SET_TILT_POSITION:
# WindowCovering and WindowCoveringBasic both support tilt
# only WindowCovering can handle the covers that are missing
# SUPPORT_SET_POSITION, SUPPORT_OPEN, and SUPPORT_CLOSE
a_type = "WindowCovering"
elif state.domain == "fan":
a_type = "Fan"
elif state.domain == "humidifier":
a_type = "HumidifierDehumidifier"
elif state.domain == "light":
a_type = "Light"
elif state.domain == "lock":
a_type = "Lock"
elif state.domain == "media_player":
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
feature_list = config.get(CONF_FEATURE_LIST, [])
if device_class == MediaPlayerDeviceClass.TV:
a_type = "TelevisionMediaPlayer"
elif validate_media_player_features(state, feature_list):
a_type = "MediaPlayer"
elif state.domain == "sensor":
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if device_class == SensorDeviceClass.TEMPERATURE or unit in (
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
):
a_type = "TemperatureSensor"
elif device_class == SensorDeviceClass.HUMIDITY and unit == PERCENTAGE:
a_type = "HumiditySensor"
elif (
device_class == SensorDeviceClass.PM10
or SensorDeviceClass.PM10 in state.entity_id
):
a_type = "PM10Sensor"
elif (
device_class == SensorDeviceClass.PM25
or SensorDeviceClass.PM25 in state.entity_id
):
a_type = "PM25Sensor"
elif (
device_class == SensorDeviceClass.GAS
or SensorDeviceClass.GAS in state.entity_id
):
a_type = "AirQualitySensor"
elif device_class == SensorDeviceClass.CO:
a_type = "CarbonMonoxideSensor"
elif device_class == SensorDeviceClass.CO2 or "co2" in state.entity_id:
a_type = "CarbonDioxideSensor"
elif device_class == SensorDeviceClass.ILLUMINANCE or unit in ("lm", LIGHT_LUX):
a_type = "LightSensor"
elif state.domain == "switch":
switch_type = config.get(CONF_TYPE, TYPE_SWITCH)
a_type = SWITCH_TYPES[switch_type]
elif state.domain == "vacuum":
a_type = "Vacuum"
elif state.domain == "remote" and features & SUPPORT_ACTIVITY:
a_type = "ActivityRemote"
elif state.domain in (
"automation",
"button",
"input_boolean",
"input_button",
"remote",
"scene",
"script",
):
a_type = "Switch"
elif state.domain in ("input_select", "select"):
a_type = "SelectSwitch"
elif state.domain == "water_heater":
a_type = "WaterHeater"
elif state.domain == "camera":
a_type = "Camera"
if a_type is None:
return None
_LOGGER.debug('Add "%s" as "%s"', state.entity_id, a_type)
return TYPES[a_type](hass, driver, name, state.entity_id, aid, config)
class HomeAccessory(Accessory):
"""Adapter class for Accessory."""
def __init__(
self,
hass,
driver,
name,
entity_id,
aid,
config,
*args,
category=CATEGORY_OTHER,
device_id=None,
**kwargs,
):
"""Initialize a Accessory object."""
super().__init__(
driver=driver,
display_name=cleanup_name_for_homekit(name),
aid=aid,
*args,
**kwargs,
)
self.config = config or {}
if device_id:
self.device_id = device_id
serial_number = device_id
domain = None
else:
self.device_id = None
serial_number = entity_id
domain = split_entity_id(entity_id)[0].replace("_", " ")
if self.config.get(ATTR_MANUFACTURER) is not None:
manufacturer = str(self.config[ATTR_MANUFACTURER])
elif self.config.get(ATTR_INTEGRATION) is not None:
manufacturer = self.config[ATTR_INTEGRATION].replace("_", " ").title()
elif domain:
manufacturer = f"{MANUFACTURER} {domain}".title()
else:
manufacturer = MANUFACTURER
if self.config.get(ATTR_MODEL) is not None:
model = str(self.config[ATTR_MODEL])
elif domain:
model = domain.title()
else:
model = MANUFACTURER
sw_version = None
if self.config.get(ATTR_SW_VERSION) is not None:
sw_version = format_version(self.config[ATTR_SW_VERSION])
if sw_version is None:
sw_version = format_version(__version__)
hw_version = None
if self.config.get(ATTR_HW_VERSION) is not None:
hw_version = format_version(self.config[ATTR_HW_VERSION])
self.set_info_service(
manufacturer=manufacturer[:MAX_MANUFACTURER_LENGTH],
model=model[:MAX_MODEL_LENGTH],
serial_number=serial_number[:MAX_SERIAL_LENGTH],
firmware_revision=sw_version[:MAX_VERSION_LENGTH],
)
if hw_version:
serv_info = self.get_service(SERV_ACCESSORY_INFO)
char = self.driver.loader.get_char(CHAR_HARDWARE_REVISION)
serv_info.add_characteristic(char)
serv_info.configure_char(
CHAR_HARDWARE_REVISION, value=hw_version[:MAX_VERSION_LENGTH]
)
self.iid_manager.assign(char)
char.broker = self
self.category = category
self.entity_id = entity_id
self.hass = hass
self._subscriptions = []
if device_id:
return
self._char_battery = None
self._char_charging = None
self._char_low_battery = None
self.linked_battery_sensor = self.config.get(CONF_LINKED_BATTERY_SENSOR)
self.linked_battery_charging_sensor = self.config.get(
CONF_LINKED_BATTERY_CHARGING_SENSOR
)
self.low_battery_threshold = self.config.get(
CONF_LOW_BATTERY_THRESHOLD, DEFAULT_LOW_BATTERY_THRESHOLD
)
"""Add battery service if available"""
entity_attributes = self.hass.states.get(self.entity_id).attributes
battery_found = entity_attributes.get(ATTR_BATTERY_LEVEL)
if self.linked_battery_sensor:
state = self.hass.states.get(self.linked_battery_sensor)
if state is not None:
battery_found = state.state
else:
self.linked_battery_sensor = None
_LOGGER.warning(
"%s: Battery sensor state missing: %s",
self.entity_id,
self.linked_battery_sensor,
)
if not battery_found:
return
_LOGGER.debug("%s: Found battery level", self.entity_id)
if self.linked_battery_charging_sensor:
state = self.hass.states.get(self.linked_battery_charging_sensor)
if state is None:
self.linked_battery_charging_sensor = None
_LOGGER.warning(
"%s: Battery charging binary_sensor state missing: %s",
self.entity_id,
self.linked_battery_charging_sensor,
)
else:
_LOGGER.debug("%s: Found battery charging", self.entity_id)
serv_battery = self.add_preload_service(SERV_BATTERY_SERVICE)
self._char_battery = serv_battery.configure_char(CHAR_BATTERY_LEVEL, value=0)
self._char_charging = serv_battery.configure_char(
CHAR_CHARGING_STATE, value=HK_NOT_CHARGABLE
)
self._char_low_battery = serv_battery.configure_char(
CHAR_STATUS_LOW_BATTERY, value=0
)
@property
def available(self):
"""Return if accessory is available."""
state = self.hass.states.get(self.entity_id)
return state is not None and state.state != STATE_UNAVAILABLE
async def run(self):
"""Handle accessory driver started event."""
state = self.hass.states.get(self.entity_id)
self.async_update_state_callback(state)
self._subscriptions.append(
async_track_state_change_event(
self.hass, [self.entity_id], self.async_update_event_state_callback
)
)
battery_charging_state = None
battery_state = None
if self.linked_battery_sensor:
linked_battery_sensor_state = self.hass.states.get(
self.linked_battery_sensor
)
battery_state = linked_battery_sensor_state.state
battery_charging_state = linked_battery_sensor_state.attributes.get(
ATTR_BATTERY_CHARGING
)
self._subscriptions.append(
async_track_state_change_event(
self.hass,
[self.linked_battery_sensor],
self.async_update_linked_battery_callback,
)
)
elif state is not None:
battery_state = state.attributes.get(ATTR_BATTERY_LEVEL)
if self.linked_battery_charging_sensor:
state = self.hass.states.get(self.linked_battery_charging_sensor)
battery_charging_state = state and state.state == STATE_ON
self._subscriptions.append(
async_track_state_change_event(
self.hass,
[self.linked_battery_charging_sensor],
self.async_update_linked_battery_charging_callback,
)
)
elif battery_charging_state is None and state is not None:
battery_charging_state = state.attributes.get(ATTR_BATTERY_CHARGING)
if battery_state is not None or battery_charging_state is not None:
self.async_update_battery(battery_state, battery_charging_state)
@ha_callback
def async_update_event_state_callback(self, event):
"""Handle state change event listener callback."""
self.async_update_state_callback(event.data.get("new_state"))
@ha_callback
def async_update_state_callback(self, new_state):
"""Handle state change listener callback."""
_LOGGER.debug("New_state: %s", new_state)
if new_state is None:
return
battery_state = None
battery_charging_state = None
if (
not self.linked_battery_sensor
and ATTR_BATTERY_LEVEL in new_state.attributes
):
battery_state = new_state.attributes.get(ATTR_BATTERY_LEVEL)
if (
not self.linked_battery_charging_sensor
and ATTR_BATTERY_CHARGING in new_state.attributes
):
battery_charging_state = new_state.attributes.get(ATTR_BATTERY_CHARGING)
if battery_state is not None or battery_charging_state is not None:
self.async_update_battery(battery_state, battery_charging_state)
self.async_update_state(new_state)
@ha_callback
def async_update_linked_battery_callback(self, event):
"""Handle linked battery sensor state change listener callback."""
if (new_state := event.data.get("new_state")) is None:
return
if self.linked_battery_charging_sensor:
battery_charging_state = None
else:
battery_charging_state = new_state.attributes.get(ATTR_BATTERY_CHARGING)
self.async_update_battery(new_state.state, battery_charging_state)
@ha_callback
def async_update_linked_battery_charging_callback(self, event):
"""Handle linked battery charging sensor state change listener callback."""
if (new_state := event.data.get("new_state")) is None:
return
self.async_update_battery(None, new_state.state == STATE_ON)
@ha_callback
def async_update_battery(self, battery_level, battery_charging):
"""Update battery service if available.
Only call this function if self._support_battery_level is True.
"""
if not self._char_battery:
# Battery appeared after homekit was started
return
battery_level = convert_to_float(battery_level)
if battery_level is not None:
if self._char_battery.value != battery_level:
self._char_battery.set_value(battery_level)
is_low_battery = 1 if battery_level < self.low_battery_threshold else 0
if self._char_low_battery.value != is_low_battery:
self._char_low_battery.set_value(is_low_battery)
_LOGGER.debug(
"%s: Updated battery level to %d", self.entity_id, battery_level
)
# Charging state can appear after homekit was started
if battery_charging is None or not self._char_charging:
return
hk_charging = HK_CHARGING if battery_charging else HK_NOT_CHARGING
if self._char_charging.value != hk_charging:
self._char_charging.set_value(hk_charging)
_LOGGER.debug(
"%s: Updated battery charging to %d", self.entity_id, hk_charging
)
@ha_callback
def async_update_state(self, new_state):
"""Handle state change to update HomeKit value.
Overridden by accessory types.
"""
raise NotImplementedError()
@ha_callback
def async_call_service(self, domain, service, service_data, value=None):
"""Fire event and call service for changes from HomeKit."""
event_data = {
ATTR_ENTITY_ID: self.entity_id,
ATTR_DISPLAY_NAME: self.display_name,
ATTR_SERVICE: service,
ATTR_VALUE: value,
}
context = Context()
self.hass.bus.async_fire(EVENT_HOMEKIT_CHANGED, event_data, context=context)
self.hass.async_create_task(
self.hass.services.async_call(
domain, service, service_data, context=context
)
)
@ha_callback
def async_reset(self):
"""Reset and recreate an accessory."""
self.hass.async_create_task(
self.hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: self.entity_id},
)
)
async def stop(self):
"""Cancel any subscriptions when the bridge is stopped."""
while self._subscriptions:
self._subscriptions.pop(0)()
class HomeBridge(Bridge):
"""Adapter class for Bridge."""
def __init__(self, hass, driver, name):
"""Initialize a Bridge object."""
super().__init__(driver, name)
self.set_info_service(
firmware_revision=format_version(__version__),
manufacturer=MANUFACTURER,
model=BRIDGE_MODEL,
serial_number=BRIDGE_SERIAL_NUMBER,
)
self.hass = hass
def setup_message(self):
"""Prevent print of pyhap setup message to terminal."""
async def async_get_snapshot(self, info):
"""Get snapshot from accessory if supported."""
if (acc := self.accessories.get(info["aid"])) is None:
raise ValueError("Requested snapshot for missing accessory")
if not hasattr(acc, "async_get_snapshot"):
raise ValueError(
"Got a request for snapshot, but the Accessory "
'does not define a "async_get_snapshot" method'
)
return await acc.async_get_snapshot(info)
class HomeDriver(AccessoryDriver):
"""Adapter class for AccessoryDriver."""
def __init__(self, hass, entry_id, bridge_name, entry_title, **kwargs):
"""Initialize a AccessoryDriver object."""
super().__init__(**kwargs)
self.hass = hass
self._entry_id = entry_id
self._bridge_name = bridge_name
self._entry_title = entry_title
@pyhap_callback
def pair(self, client_uuid, client_public, client_permissions):
"""Override super function to dismiss setup message if paired."""
success = super().pair(client_uuid, client_public, client_permissions)
if success:
async_dismiss_setup_message(self.hass, self._entry_id)
return success
@pyhap_callback
def unpair(self, client_uuid):
"""Override super function to show setup message if unpaired."""
super().unpair(client_uuid)
if self.state.paired:
return
async_show_setup_message(
self.hass,
self._entry_id,
accessory_friendly_name(self._entry_title, self.accessory),
self.state.pincode,
self.accessory.xhm_uri(),
)
| {
"content_hash": "2e8f7fab1398ff268dab3c9f2ab752d9",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 88,
"avg_line_length": 34.61295681063123,
"alnum_prop": 0.600758266545088,
"repo_name": "GenericStudent/home-assistant",
"id": "c77fa96a5322f46e9fe9561ba7a956ef94178f0d",
"size": "20837",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit/accessories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
from bioagents.tra import kappa_client
__all__ = ['TRA', 'get_ltl_from_pattern', 'apply_condition',
'get_create_observable', 'pysb_to_kappa', 'get_sim_result',
'get_all_patterns', 'TemporalPattern', 'TimeInterval',
'InvalidTemporalPatternError', 'InvalidTimeIntervalError',
'MolecularCondition', 'MolecularQuantity',
'MolecularQuantityReference', 'InvalidMolecularConditionError',
'InvalidMolecularQuantityError',
'InvalidMolecularQuantityRefError', 'SimulatorError']
import os
import numpy
import logging
from time import sleep
from typing import List
from copy import deepcopy
from datetime import datetime
import sympy.physics.units as units
import indra.statements as ist
import indra.assemblers.pysb.assembler as pa
from indra.assemblers.english import assembler as english_assembler
from pysb import Observable
from pysb.integrate import Solver
from pysb.export.kappa import KappaExporter
from pysb.core import ComponentDuplicateNameError
import bioagents.tra.model_checker as mc
import matplotlib
from bioagents import BioagentException, get_img_path
from .model_checker import HypothesisTester
matplotlib.use('Agg')
import matplotlib.pyplot as plt
logger = logging.getLogger('TRA')
class TRA(object):
def __init__(self, use_kappa=True, use_kappa_rest=False):
kappa_mode_label = 'rest' if use_kappa_rest else 'standard'
if not use_kappa:
self.ode_mode = True
logger.info('Using ODE mode in TRA.')
else:
self.ode_mode = False
try:
self.kappa = kappa_client.KappaRuntime('TRA_simulations',
use_rest=use_kappa_rest)
logger.info('Using kappa %s.' % kappa_mode_label)
except Exception as e:
logger.error('Could not use kappa %s.' % kappa_mode_label)
logger.exception(e)
self.ode_mode = True
return
def check_property(self, model: "pysb.Model",
pattern: "TemporalPattern",
conditions: "List[MolecularCondition]" = None,
max_time: float = None,
num_times: int = None,
num_sim: int = 2,
hypothesis_tester: HypothesisTester = None):
# TODO: handle multiple entities (observables) in pattern
# TODO: set max_time based on some model property if not given
# NOTE: pattern.time_limit.ub takes precedence over max_time
# Make an observable for the simulations
logger.info('Trying to make an observable for: %s',
pattern.entities[0])
obs = get_create_observable(model, pattern.entities[0])
# Make pattern
fstr = get_ltl_from_pattern(pattern, obs)
given_pattern = (fstr is not None)
# Set the time limit for the simulations
if pattern.time_limit is not None and pattern.time_limit.ub > 0:
# Convert sympy.Float to regular float
max_time = float(pattern.time_limit.get_ub_seconds())
elif not max_time:
max_time = 20000
if not num_times:
num_times = 100
# The period at which the output is sampled
plot_period = int(1.0*max_time / num_times)
if pattern.time_limit and pattern.time_limit.lb > 0:
min_time = pattern.time_limit.get_lb_seconds()
min_time_idx = int(num_times * (1.0*min_time / max_time))
else:
min_time_idx = 0
# If we have a specific pattern to test, and also a hypothesis tester
# passed in, then we do adaptive sample size model checking to
# determine if the given property is satisfied.
if given_pattern and hypothesis_tester:
# We have to create these lists since they are required
# downstream for plotting and reporting.
yobs_list = []
results = []
thresholds = []
truths = []
# We simulate and run model checking until the hypothesis
# tester tells us to stop.
while True:
# This runs a single simulation
result = self.run_simulations(model, conditions, 1,
min_time_idx, max_time, plot_period)[0]
results.append(result)
yobs = deepcopy(result[1])
threshold = self.discretize_obs(model, yobs, obs.name)
MC = mc.ModelChecker(fstr, yobs)
logger.info('Main property %s' % MC.truth)
truths.append(MC.truth)
thresholds.append(threshold)
yobs_list.append(yobs)
# We run the hypothesis tester here on the list of true/false
# values collected so far and if we get 1 or -1, we can stop.
ht_result = hypothesis_tester.test(truths)
if ht_result is not None:
break
# We now calculate some statistics needed below
num_sim = len(results)
results_copy = deepcopy(results)
sat_rate = numpy.count_nonzero(truths) / (1.0 * num_sim)
make_suggestion = (sat_rate < 0.3)
if make_suggestion:
logger.info('MAKING SUGGESTION with sat rate %.2f.' % sat_rate)
# In this case, we run simulation with a fixed num_sim and don't
# use hypothesis testing.
else:
results = self.run_simulations(model, conditions, num_sim,
min_time_idx, max_time,
plot_period)
results_copy = deepcopy(results)
yobs_list = [yobs for _, yobs in results]
# Discretize observations
# WARNING: yobs is changed by discretize_obs in place
thresholds = [self.discretize_obs(model, yobs, obs.name)
for yobs in yobs_list]
# We check for the given pattern
if given_pattern:
truths = []
for yobs in yobs_list:
# Run model checker on the given pattern
MC = mc.ModelChecker(fstr, yobs)
logger.info('Main property %s' % MC.truth)
truths.append(MC.truth)
sat_rate = numpy.count_nonzero(truths) / (1.0*num_sim)
make_suggestion = (sat_rate < 0.3)
if make_suggestion:
logger.info('MAKING SUGGESTION with sat rate %.2f.' % sat_rate)
else:
make_suggestion = True
fig_path = self.plot_results(results_copy, pattern.entities[0],
obs.name, thresholds[0])
# If no suggestion is to be made, we return
if not make_suggestion:
return sat_rate, num_sim, None, None, fig_path
# Run model checker on all patterns
all_patterns = get_all_patterns(obs.name)
for fs, kpat, pat_obj in all_patterns:
logger.info('Testing pattern: %s' % kpat)
truths = []
for yobs in yobs_list:
MC = mc.ModelChecker(fs, yobs)
logger.info('Property %s' % MC.truth)
truths.append(MC.truth)
sat_rate_new = numpy.count_nonzero(truths) / (1.0*num_sim)
if sat_rate_new > 0.5:
if not given_pattern:
return sat_rate_new, num_sim, kpat, pat_obj, fig_path
else:
return sat_rate, num_sim, kpat, pat_obj, fig_path
def compare_conditions(self, model, condition_agent, target_agent, up_dn,
max_time=None, num_times=101):
if not max_time:
max_time = 20000
if not num_times:
num_times = 101
obs = get_create_observable(model, target_agent)
cond_quant = MolecularQuantityReference('total', condition_agent)
all_results = []
plot_period = max_time / (num_times - 1)
ts = numpy.linspace(0, max_time, num_times)
mults = [0.0, 100.0]
for mult in mults:
condition = MolecularCondition('multiple', cond_quant, mult)
results = self.run_simulations(model, [condition], 1, 0,
max_time, plot_period)
obs_values = results[0][1][obs.name]
all_results.append(obs_values)
# Plotting
fig_path = self.plot_compare_conditions(ts, all_results, target_agent,
obs.name)
diff = numpy.sum(all_results[-1][:len(ts)] -
all_results[0][:len(ts)]) / len(ts)
logger.info('TRA condition difference: %.2f' % diff)
# If there is a decrease in the observable, we return True
if abs(diff) < 0.01:
res = 'no_change'
elif up_dn == 'dn':
res = 'yes_decrease' if (diff < 0) else 'no_increase'
else:
res = 'no_decrease' if (diff < 0) else 'yes_increase'
return res, fig_path
def plot_compare_conditions(self, ts, results, agent, obs_name):
max_val_lim = max((numpy.max(results[0]) + 0.25*numpy.max(results[0])),
(numpy.max(results[1]) + 0.25*numpy.max(results[1])),
101.0)
plt.figure()
plt.ion()
plt.plot(ts, results[0][:len(ts)], label='Without condition')
plt.plot(ts, results[-1][:len(ts)], label='With condition')
plt.ylim(-5, max_val_lim)
plt.xlabel('Time (s)')
plt.ylabel('Amount (molecules)')
agent_str = english_assembler._assemble_agent_str(agent).agent_str
plt.title('Simulation results for %s' % agent_str)
plt.legend()
fig_path = get_img_path(obs_name + '.png')
plt.savefig(fig_path)
return fig_path
def plot_results(self, results, agent, obs_name, thresh=50):
plt.figure()
plt.ion()
max_val_lim = max(max((numpy.max(results[0][1][obs_name]) + 0.25*numpy.max(results[0][1][obs_name])), 101.0),
thresh)
max_time = max([result[0][-1] for result in results])
lr = matplotlib.patches.Rectangle((0, 0), max_time, thresh, color='red',
alpha=0.1)
hr = matplotlib.patches.Rectangle((0, thresh), max_time,
max_val_lim-thresh,
color='green', alpha=0.1)
ax = plt.gca()
ax.add_patch(lr)
ax.add_patch(hr)
if thresh + 0.05*max_val_lim < max_val_lim:
plt.text(10, thresh + 0.05*max_val_lim, 'High', fontsize=10)
plt.text(10, thresh - 0.05*max_val_lim, 'Low')
for tspan, yobs in results:
plt.plot(tspan, yobs[obs_name])
plt.ylim(-5, max_val_lim)
plt.xlim(-max_time/100, max_time+max_time/100)
plt.xlabel('Time (s)')
plt.ylabel('Amount (molecules)')
agent_str = english_assembler._assemble_agent_str(agent).agent_str
plt.title('Simulation results for %s' % agent_str)
fig_path = get_img_path(obs_name + '.png')
plt.savefig(fig_path)
return fig_path
def run_simulations(self, model, conditions, num_sim, min_time_idx,
max_time, plot_period):
logger.info('Running %d simulations with time limit of %d and plot '
'period of %d.' % (num_sim, max_time, plot_period))
self.sol = None
results = []
for i in range(num_sim):
# Apply molecular condition to model
try:
model_sim = self.condition_model(model, conditions)
except MissingMonomerError as e:
raise e
except Exception as e:
logger.exception(e)
msg = 'Applying molecular condition failed.'
raise InvalidMolecularConditionError(msg)
# Run a simulation
logger.info('Starting simulation %d' % (i+1))
if not self.ode_mode:
try:
tspan, yobs = self.simulate_kappa(model_sim, max_time,
plot_period)
except Exception as e:
logger.exception(e)
raise SimulatorError('Kappa simulation failed.')
else:
tspan, yobs = self.simulate_odes(model_sim, max_time,
plot_period)
# Get and plot observable
start_idx = min(min_time_idx, len(yobs))
yobs_from_min = yobs[start_idx:]
tspan = tspan[start_idx:]
results.append((tspan, yobs_from_min))
return results
def discretize_obs(self, model, yobs, obs_name):
# TODO: This needs to be done in a model/observable-dependent way
default_total_val = 100
start_val = yobs[obs_name][0]
max_val = numpy.max(yobs[obs_name])
min_val = numpy.min(yobs[obs_name])
# If starts low, discretize wrt total value
if start_val < 1e-5:
thresh = 0.3 * default_total_val
# If starts high, discretize wrt range with a certain minimum
else:
thresh = start_val + max(0.5*(max_val - min_val),
default_total_val * 0.10)
for i, v in enumerate(yobs[obs_name]):
yobs[obs_name][i] = 1 if v > thresh else 0
return thresh
def condition_model(self, model, conditions):
# Set up simulation conditions
if conditions:
model_sim = deepcopy(model)
for condition in conditions:
apply_condition(model_sim, condition)
else:
model_sim = model
return model_sim
def simulate_kappa(self, model_sim, max_time, plot_period):
# Export kappa model
kappa_model = pysb_to_kappa(model_sim)
# Start simulation
self.kappa.compile(code_list=[kappa_model])
self.kappa.start_sim(plot_period=plot_period,
pause_condition="[T] > %d" % max_time)
while True:
sleep(0.2)
status_json = self.kappa.sim_status()['simulation_info_progress']
is_running = status_json.get('simulation_progress_is_running')
if not is_running:
break
else:
if status_json.get('time_percentage') is not None:
logger.info(
'Sim time percentage: %d' %
status_json.get('simulation_progress_time_percentage')
)
tspan, yobs = get_sim_result(self.kappa.sim_plot())
self.kappa.reset_project()
return tspan, yobs
def simulate_odes(self, model_sim, max_time, plot_period):
ts = numpy.linspace(0, max_time, int(1.0*max_time/plot_period) + 1)
if self.sol is None:
self.sol = Solver(model_sim, ts)
self.sol.run()
return ts, self.sol.yobs
def get_ltl_from_pattern(pattern, obs):
if not pattern.pattern_type:
return None
if pattern.pattern_type == 'transient':
fstr = mc.transient_formula(obs.name)
elif pattern.pattern_type == 'sustained':
fstr = mc.sustained_formula(obs.name)
elif pattern.pattern_type in ('no_change', 'always_value'):
if not hasattr(pattern, 'value') or pattern.value is None:
fstr = mc.noact_formula(obs.name)
elif not pattern.value.quant_type == 'qualitative':
msg = 'Cannot handle always value of "%s" type.' % \
pattern.value.quant_type
raise InvalidTemporalPatternError(msg)
else:
if pattern.value.value == 'low':
val = 0
elif pattern.value.value == 'high':
val = 1
else:
msg = 'Cannot handle always value of "%s".' % \
pattern.value.value
raise InvalidTemporalPatternError(msg)
fstr = mc.always_formula(obs.name, val)
elif pattern.pattern_type == 'eventual_value':
if not pattern.value.quant_type == 'qualitative':
msg = 'Cannot handle eventual value of "%s" type.' % \
pattern.value.quant_type
raise InvalidTemporalPatternError(msg)
if pattern.value.value == 'low':
val = 0
elif pattern.value.value == 'high':
val = 1
else:
msg = 'Cannot handle eventual value of "%s".' % \
pattern.value.value
raise InvalidTemporalPatternError(msg)
fstr = mc.eventual_formula(obs.name, val)
elif pattern.pattern_type == 'sometime_value':
if not pattern.value.quant_type == 'qualitative':
msg = 'Cannot handle sometime value of "%s" type.' % \
pattern.value.quant_type
raise InvalidTemporalPatternError(msg)
if pattern.value.value == 'low':
val = 0
elif pattern.value.value == 'high':
val = 1
else:
msg = 'Cannot handle sometime value of "%s".' % \
pattern.value.value
raise InvalidTemporalPatternError(msg)
fstr = mc.sometime_formula(obs.name, val)
else:
msg = 'Unknown pattern %s' % pattern.pattern_type
raise InvalidTemporalPatternError(msg)
return fstr
def apply_condition(model, condition):
agent = condition.quantity.entity
try:
monomer = model.monomers[pa._n(agent.name)]
except KeyError:
raise MissingMonomerError('%s is not in the model ' % agent.name,
agent)
site_pattern = pa.get_site_pattern(agent)
# TODO: handle modified patterns
if site_pattern:
logger.warning('Cannot handle initial conditions on' +
' modified monomers.')
if condition.condition_type == 'exact':
ic_name = monomer.name + '_0'
if condition.value.quant_type == 'number':
pa.set_base_initial_condition(model, monomer,
condition.value.value)
else:
logger.warning('Cannot handle non-number initial conditions')
elif condition.condition_type == 'multiple':
# TODO: refer to annotations for the IC name
ic_name = monomer.name + '_0'
model.parameters[ic_name].value *= condition.value
elif condition.condition_type == 'decrease':
ic_name = monomer.name + '_0'
model.parameters[ic_name].value *= 0.9
elif condition.condition_type == 'increase':
ic_name = monomer.name + '_0'
model.parameters[ic_name].value *= 1.1
logger.info('New initial condition: %s' % model.parameters[ic_name])
def get_create_observable(model, agent):
site_pattern = pa.get_site_pattern(agent)
obs_name = pa.get_agent_rule_str(agent) + '_obs'
try:
monomer = model.monomers[pa._n(agent.name)]
except KeyError:
raise MissingMonomerError('%s is not in the model ' % agent.name,
agent)
try:
monomer_state = monomer(site_pattern)
except Exception as e:
msg = 'Site pattern %s invalid for monomer %s' % \
(site_pattern, monomer.name)
raise MissingMonomerSiteError(msg)
obs = Observable(obs_name, monomer(site_pattern))
try:
model.add_component(obs)
except ComponentDuplicateNameError as e:
pass
return obs
def pysb_to_kappa(model):
ke = KappaExporter(model)
kappa_model = ke.export()
return kappa_model
def get_sim_result(kappa_plot):
values = kappa_plot['series']
i_t = kappa_plot['legend'].index('[T]')
values.sort(key=lambda x: x[i_t])
nt = len(values)
obs_dict = {
j: key for j, key in enumerate(kappa_plot['legend'])
if key != '[T]'
}
yobs = numpy.ndarray(
nt, list(zip(obs_dict.values(), [float]*len(obs_dict))))
tspan = []
for i, value in enumerate(values):
tspan.append(value[i_t])
for j, obs in obs_dict.items():
yobs[obs][i] = value[j]
return (tspan, yobs)
def get_all_patterns(obs_name):
patterns = []
# Always high/low
for val_num, val_str in zip((0, 1), ('low', 'high')):
fstr = mc.always_formula(obs_name, val_num)
kpattern = (
'(:type "no_change" '
':value (:type "qualitative" :value "%s"))' % val_str
)
pattern = TemporalPattern('no_change', [], None,
value=MolecularQuantity('qualitative',
'%s' % val_str))
patterns.append((fstr, kpattern, pattern))
# Eventually high/low
for val_num, val_str in zip((0, 1), ('low', 'high')):
fstr = mc.eventual_formula(obs_name, val_num)
kpattern = (
'(:type "eventual_value" '
':value (:type "qualitative" :value "%s"))' % val_str
)
pattern = TemporalPattern('eventual_value', [], None,
value=MolecularQuantity('qualitative',
'%s' % val_str))
patterns.append((fstr, kpattern, pattern))
# Transient
fstr = mc.transient_formula(obs_name)
kpattern = '(:type "transient")'
pattern = TemporalPattern('transient', [], None)
patterns.append((fstr, kpattern, pattern))
# Sustined
fstr = mc.sustained_formula(obs_name)
kpattern = '(:type "sustained")'
pattern = TemporalPattern('sustained', [], None)
patterns.append((fstr, kpattern, pattern))
# Sometime high/low
for val_num, val_str in zip((0, 1), ('low', 'high')):
fstr = mc.sometime_formula(obs_name, val_num)
kpattern = (
'(:type "sometime_value" '
':value (:type "qualitative" :value "%s"))' % val_str
)
pattern = TemporalPattern('sometime_value', [], None,
value=MolecularQuantity('qualitative',
'%s' % val_str))
patterns.append((fstr, kpattern, pattern))
# No change any value
fstr = mc.noact_formula(obs_name)
kpattern = '(:type "no_change")'
pattern = TemporalPattern('no_change', [], None)
patterns.append((fstr, kpattern, pattern))
return patterns
# #############################################################
# Classes for representing time intervals and temporal patterns
# #############################################################
class TemporalPattern(object):
"""A temporal pattern"""
def __init__(self, pattern_type: str, entities: List[ist.Agent], time_limit, **kwargs):
self.pattern_type = pattern_type
self.entities = entities
self.time_limit = time_limit
# TODO: handle extra arguments by pattern type
if self.pattern_type in \
('always_value', 'no_change', 'eventual_value', 'sometime_value'):
value = kwargs.get('value')
if value is None:
# Value is optional for no_change
if self.pattern_type != 'no_change':
msg = 'Missing molecular quantity'
raise InvalidTemporalPatternError(msg)
self.value = value
class InvalidTemporalPatternError(BioagentException):
pass
class TimeInterval(object):
def __init__(self, lb, ub, unit: str):
if unit == 'day':
sym_unit = units.day
elif unit == 'hour':
sym_unit = units.hour
elif unit == 'minute':
sym_unit = units.minute
elif unit == 'second':
sym_unit = units.second
else:
raise InvalidTimeIntervalError('Invalid unit %s' % unit)
if lb is not None:
try:
lb_num = float(lb)
except ValueError:
raise InvalidTimeIntervalError('Bad bound %s' % lb)
self.lb = lb_num * sym_unit
else:
self.lb = None
if ub is not None:
try:
ub_num = float(ub)
except ValueError:
raise InvalidTimeIntervalError('Bad bound %s' % ub)
self.ub = ub_num * sym_unit
else:
self.ub = None
def _convert_to_sec(self, val):
if val is not None:
try:
# sympy >= 1.1
return units.convert_to(val, units.seconds).args[0]
except Exception:
# sympy < 1.1
return val / units.seconds
return None
def get_lb_seconds(self):
return self._convert_to_sec(self.lb)
def get_ub_seconds(self):
return self._convert_to_sec(self.ub)
class InvalidTimeIntervalError(BioagentException):
pass
# ############################################################
# Classes for representing molecular quantities and conditions
# ############################################################
class MolecularQuantityReference(object):
def __init__(self, quant_type: str, entity: ist.Agent):
if quant_type in ['total', 'initial']:
self.quant_type = quant_type
else:
msg = 'Unknown quantity type %s' % quant_type
raise InvalidMolecularQuantityRefError(msg)
if not isinstance(entity, ist.Agent):
msg = 'Invalid molecular Agent'
raise InvalidMolecularQuantityRefError(msg)
else:
self.entity = entity
class MolecularQuantity(object):
def __init__(self, quant_type: str, value: str, unit: str = None):
if quant_type == 'concentration':
try:
val = float(value)
except ValueError:
msg = 'Invalid concentration value %s' % value
raise InvalidMolecularQuantityError(msg)
if unit == 'mM':
sym_value = val * units.milli * units.mol / units.liter
elif unit == 'uM':
sym_value = val * units.micro * units.mol / units.liter
elif unit == 'nM':
sym_value = val * units.nano * units.mol / units.liter
elif unit == 'pM':
sym_value = val * units.pico * units.mol / units.liter
else:
msg = 'Invalid unit %s' % unit
raise InvalidMolecularQuantityError(msg)
self.value = sym_value
elif quant_type == 'number':
try:
val = int(value)
if val < 0:
raise ValueError
except ValueError:
msg = 'Invalid molecule number value %s' % value
raise InvalidMolecularQuantityError(msg)
self.value = val
elif quant_type == 'qualitative':
if value in ['low', 'high']:
self.value = value
else:
msg = 'Invalid qualitative quantity value %s' % value
raise InvalidMolecularQuantityError(msg)
else:
raise InvalidMolecularQuantityError('Invalid quantity type %s' %
quant_type)
self.quant_type = quant_type
class MolecularCondition(object):
def __init__(self, condition_type: str,
quantity: MolecularQuantityReference,
value: MolecularQuantity = None):
if isinstance(quantity, MolecularQuantityReference):
self.quantity = quantity
else:
msg = 'Invalid molecular quantity reference'
raise InvalidMolecularConditionError(msg)
if condition_type == 'exact':
if isinstance(value, MolecularQuantity):
self.value = value
else:
msg = 'Invalid molecular condition value'
raise InvalidMolecularConditionError(msg)
elif condition_type == 'multiple':
try:
value_num = float(value)
if value_num < 0:
raise ValueError('Negative molecular quantity not allowed')
except ValueError as e:
raise InvalidMolecularConditionError(e)
self.value = value_num
elif condition_type in ['increase', 'decrease']:
self.value = None
else:
msg = 'Unknown condition type: %s' % condition_type
raise InvalidMolecularConditionError(msg)
self.condition_type = condition_type
class InvalidMolecularQuantityError(BioagentException):
pass
class InvalidMolecularQuantityRefError(BioagentException):
pass
class InvalidMolecularEntityError(BioagentException):
pass
class InvalidMolecularConditionError(BioagentException):
pass
class MissingMonomerError(BioagentException):
def __init__(self, message, monomer):
super().__init__(message)
self.monomer = monomer
class MissingMonomerSiteError(BioagentException):
pass
class SimulatorError(BioagentException):
pass
| {
"content_hash": "051aaf6c62670d57ad8f0c07bb1d3890",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 117,
"avg_line_length": 39.6514131897712,
"alnum_prop": 0.5487933199823496,
"repo_name": "bgyori/bioagents",
"id": "611f656c1414df4d959727682c92979dfd99a1b2",
"size": "29461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bioagents/tra/tra.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "511890"
}
],
"symlink_target": ""
} |
from django.utils import simplejson
import taskforce
from taskforce.base import *
def spit_errors(fn):
def new_fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception, e:
return simplejson.dumps({
'error':str(e),
})
return new_fn
@spit_errors
def task_new(force, task_type, task_id, run_args, run_kwargs):
run_args = simplejson.loads(run_args)
run_kwargs = simplejson.loads(run_kwargs)
t = force.create_task(task_type, task_id, run_args, run_kwargs)
force.add_task(t)
return simplejson.dumps({
'task_id':task_id,
})
@spit_errors
def task_status(force, id):
status = force.get_status(task_id = id)
progress = force.get_progress(task_id = id)
return simplejson.dumps({
'status':status,
'progress':progress,
})
@spit_errors
def task_results(force, id):
results = force.get_results(task_id = id)
return simplejson.dumps({
'results':results
})
| {
"content_hash": "60f39f2ba7d4156b95638b0afff83547",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 67,
"avg_line_length": 24.853658536585368,
"alnum_prop": 0.6123650637880275,
"repo_name": "mallipeddi/django-taskforce",
"id": "b239a8c796a325c4769c95f1e8efea42b9c3e969",
"size": "1019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17657"
}
],
"symlink_target": ""
} |
"""
instabot example
Workflow:
Repost best photos from users to your account
By default bot checks username_database.txt
The file should contain one username per line!
"""
import argparse
import os
import sys
import random
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], "../"))
from instabot import Bot, utils # noqa: E402
USERNAME_DATABASE = "username_database.txt"
POSTED_MEDIAS = "posted_medias.txt"
def repost_best_photos(bot, users, amount=1):
medias = get_not_used_medias_from_users(bot, users)
medias = sort_best_medias(bot, medias, amount)
for media in tqdm(medias, desc="Reposting photos"):
repost_photo(bot, media)
def sort_best_medias(bot, media_ids, amount=1):
best_medias = [
bot.get_media_info(media)[0]
for media in tqdm(media_ids, desc="Getting media info")
]
best_medias = sorted(
best_medias, key=lambda x: (x["like_count"], x["comment_count"]), reverse=True
)
return [best_media["id"] for best_media in best_medias[:amount]]
def get_not_used_medias_from_users(bot, users=None, users_path=USERNAME_DATABASE):
if not users:
if os.stat(USERNAME_DATABASE).st_size == 0:
bot.logger.warning("No username(s) in thedatabase")
sys.exit()
elif os.path.exists(USERNAME_DATABASE):
users = utils.file(users_path).list
else:
bot.logger.warning("No username database")
sys.exit()
total_medias = []
user = random.choice(users)
medias = bot.get_user_medias(user, filtration=False)
medias = [media for media in medias if not exists_in_posted_medias(media)]
total_medias.extend(medias)
return total_medias
def exists_in_posted_medias(new_media_id, path=POSTED_MEDIAS):
medias = utils.file(path).list
return str(new_media_id) in medias
def update_posted_medias(new_media_id, path=POSTED_MEDIAS):
medias = utils.file(path)
medias.append(str(new_media_id))
return True
def repost_photo(bot, new_media_id, path=POSTED_MEDIAS):
if exists_in_posted_medias(new_media_id, path):
bot.logger.warning("Media {} was uploaded earlier".format(new_media_id))
return False
photo_path = bot.download_photo(new_media_id, save_description=True)
if not photo_path or not isinstance(photo_path, str):
# photo_path could be True, False, or a file path.
return False
try:
with open(photo_path[:-3] + "txt", "r") as f:
text = "".join(f.readlines())
except FileNotFoundError:
try:
with open(photo_path[:-6] + ".txt", "r") as f:
text = "".join(f.readlines())
except FileNotFoundError:
bot.logger.warning("Cannot find the photo that is downloaded")
pass
if bot.upload_photo(photo_path, text):
update_posted_medias(new_media_id, path)
bot.logger.info("Media_id {} is saved in {}".format(new_media_id, path))
return True
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("-u", type=str, help="username")
parser.add_argument("-p", type=str, help="password")
parser.add_argument("-proxy", type=str, help="proxy")
parser.add_argument("-file", type=str, help="users filename")
parser.add_argument("-amount", type=int, help="amount", default=1)
parser.add_argument("users", type=str, nargs="*", help="users")
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p, proxy=args.proxy)
users = None
if args.users:
users = args.users
elif args.file:
users = utils.file(args.file).list
repost_best_photos(bot, users, args.amount)
| {
"content_hash": "b755ac9fd305497503574d5773be0456",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 86,
"avg_line_length": 32.14912280701754,
"alnum_prop": 0.6567530695770805,
"repo_name": "instagrambot/instabot",
"id": "f9267df2f9209c3d2b6b5584ab07f8b2ebcf7af1",
"size": "3665",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/repost_best_photos_from_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "207"
},
{
"name": "Python",
"bytes": "436518"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
} |
from restclients_core import models
class Evaluation(models.Model):
section_sln = models.IntegerField()
eval_open_date = models.DateTimeField()
eval_close_date = models.DateTimeField()
is_completed = models.NullBooleanField()
eval_status = models.CharField(max_length=7)
eval_url = models.URLField()
def __init__(self, *args, **kwargs):
super(Evaluation, self).__init__(*args, **kwargs)
self.instructor_ids = []
def __str__(self):
return "{%s: %d, %s: %s, %s: %s, %s: %s, %s: %s}" % (
"sln", self.section_sln,
"eval_open_date", self.eval_open_date,
"eval_close_date", self.eval_close_date,
"eval_url", self.eval_url,
"is_completed", self.is_completed)
| {
"content_hash": "3a4bf9984f12c0eb23e405a8fb72e8a3",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 61,
"avg_line_length": 35.09090909090909,
"alnum_prop": 0.5867875647668394,
"repo_name": "uw-it-aca/uw-restclients",
"id": "abaaaa5f1420b8ac053318b285a0345ca4d00388",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restclients/models/iasystem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "38842"
},
{
"name": "Python",
"bytes": "664277"
},
{
"name": "Roff",
"bytes": "9566"
}
],
"symlink_target": ""
} |
from typing import Dict, List
import Levenshtein_search
from .core import Enumerator
from .index import Index
class LevenshteinIndex(Index):
_doc_to_id: Dict[str, int] # type: ignore[assignment]
def __init__(self) -> None:
self.index_key = Levenshtein_search.populate_wordset(-1, [])
self._doc_to_id = Enumerator(start=1)
def index(self, doc: str) -> None: # type: ignore[override]
if doc not in self._doc_to_id:
self._doc_to_id[doc]
Levenshtein_search.add_string(self.index_key, doc)
def unindex(self, doc: str) -> None: # type: ignore[override]
del self._doc_to_id[doc]
Levenshtein_search.clear_wordset(self.index_key)
self.index_key = Levenshtein_search.populate_wordset(-1, list(self._doc_to_id))
def initSearch(self) -> None:
pass
def search(self, doc: str, threshold: int = 0) -> List[int]: # type: ignore[override]
matching_docs = Levenshtein_search.lookup(self.index_key, doc, threshold)
if matching_docs:
return [self._doc_to_id[match] for match, _, _ in matching_docs]
else:
return []
def __del__(self) -> None:
Levenshtein_search.clear_wordset(self.index_key)
| {
"content_hash": "01d7af9134ec81004f20907526b4dd67",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 33.75675675675676,
"alnum_prop": 0.6253002401921537,
"repo_name": "dedupeio/dedupe",
"id": "d49d74807ef108135e1a37204e5454bd341364c0",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dedupe/levenshtein.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "1477"
},
{
"name": "Python",
"bytes": "228051"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sure
from .. import Chain, NoApiKeyId, NoApiKeySecret, Webhook, update_webhook
from .mock_http_adapter import *
def test_update_webhook():
update_webhook(webhook_id=webhook_id, webhook_url=webhook_url,
api_key_id=api_key_id, api_key_secret=api_key_secret,
http_adapter=http_adapter) \
.should.equal(webhook)
def test_update_webhook_using_class():
Chain(api_key_id=api_key_id, api_key_secret=api_key_secret,
http_adapter=http_adapter) \
.update_webhook(webhook_id=webhook_id, webhook_url=webhook_url) \
.should.equal(webhook)
def test_update_webhook_without_api_key_id():
(lambda: update_webhook(webhook_id=webhook_id, webhook_url=webhook_url,
http_adapter=no_http())) \
.should.throw(NoApiKeyId)
def test_update_webhook_without_api_key_secret():
(lambda: update_webhook(webhook_id=webhook_id, webhook_url=webhook_url,
api_key_id=api_key_id, http_adapter=no_http())) \
.should.throw(NoApiKeySecret)
api_key_id = 'DEMO-4a5e1e4'
api_key_secret = 'DEMO-f8aef80',
webhook_id = 'FFA21991-5669-4728-8C83-74DEC4C93A4A'
webhook_url = 'https://your-updated-url.com'
url = 'https://api.chain.com/v1/webhooks/FFA21991-5669-4728-8C83-74DEC4C93A4A'
request_json = """
{"url": "https://your-updated-url.com"}
"""
response_body = """
{
"id": "FFA21991-5669-4728-8C83-74DEC4C93A4A",
"url": "https://your-updated-url.com"
}
"""
webhook = Webhook(
id=webhook_id,
url=webhook_url,
)
http_adapter = mock_put_json(url, request_json, response_body)
| {
"content_hash": "366c3aa898dcad8fb70212591ba8ba89",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 27.716666666666665,
"alnum_prop": 0.6578472639807577,
"repo_name": "cardforcoin/chain-bitcoin-python",
"id": "23dbd91c509155e5dd5ff81d43f69cf85464dfaf",
"size": "1663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chain_bitcoin/tests/test_update_webhook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112820"
}
],
"symlink_target": ""
} |
__author__ = 'Sebastian Wernicke'
from ScrabbleBot import ScrabbleAI
from ScrabbleBot import ScrabbleGame
from ScrabbleBot import ScrabbleUtils
import random
class ScrabbleMatch:
def __init__(self, dictionary_file: str, player_list: list):
self._total_scores = dict()
self._total_matchwins = dict()
self._player_names = set()
self._players = []
for p in player_list:
if type(p) is not ScrabbleAI:
TypeError("Player list may only contain ScrabbleAI objects")
if p.name in self._player_names:
ValueError("A player by the name '" + p.name + "' already exists. Each player must have a unique name")
self._players.append(p)
self._total_scores[p.name] = 0
self._total_matchwins[p.name] = 0
print("Welcome to a new Scrabble match!")
# Load the set of allowed words
self._legal_words = ScrabbleUtils.load_word_set(dictionary_file)
# Build the Word Signatures, assuming 2 blanks
# (since we only build this once, this is the most efficient)
self._word_signatures = ScrabbleUtils.build_word_signatures(self._legal_words, 2)
#
# Let the players play against each other for a specified number of rounds
# the randomize_order flag signals if player order is to be randomized for each round
#
def play_match(self, num_rounds: int, randomize_order: bool, verbosity: int):
if verbosity > 0:
print("Match is starting!")
# Play the rounds
for i in range(0, num_rounds):
if randomize_order:
random.shuffle(self._players)
# Initialize game
game = ScrabbleGame.ScrabbleGame(self._players, self._legal_words, self._word_signatures)
# Play game
tmp_result = game.play_until_finished(verbosity)
# Keep score
max_score = max(tmp_result)
for j in range(0, len(tmp_result)):
player_name = self._players[j].name
self._total_scores[player_name] += tmp_result[j]
if tmp_result[j] == max_score:
if verbosity > 0:
print("Player '" + player_name + "' (" + str(j) + ") wins!")
self._total_matchwins[player_name] += 1
print("Round " + str(i) + " | game score: " + str(tmp_result) +
" total matchwins:" + str(self._total_matchwins))
def get_total_scores(self):
return self._total_scores
def get_total_matchwins(self):
return self._total_matchwins | {
"content_hash": "3bf854968451fa49bc64888b67990532",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 119,
"avg_line_length": 35.810810810810814,
"alnum_prop": 0.5867924528301887,
"repo_name": "wernicke/ScrabbleBot",
"id": "11d1cd5d7313d422b54b1c638b5e4acbbeee3b65",
"size": "2898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScrabbleBot/ScrabbleMatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55688"
}
],
"symlink_target": ""
} |
"""Tests for initializers in init_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InitializersTest(test.TestCase):
def _identical_test(self,
init1,
init2,
assertion,
shape=None,
dtype=dtypes.float32):
if shape is None:
shape = [100]
t1 = self.evaluate(init1(shape, dtype))
t2 = self.evaluate(init2(shape, dtype))
self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
self.assertEqual(assertion, np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
def _duplicated_test(self,
init,
shape=None,
dtype=dtypes.float32):
if shape is None:
shape = [100]
t1 = self.evaluate(init(shape, dtype))
t2 = self.evaluate(init(shape, dtype))
self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
self.assertFalse(np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
def _range_test(self,
init,
shape,
target_mean=None,
target_std=None,
target_max=None,
target_min=None):
output = self.evaluate(init(shape))
self.assertEqual(output.shape, shape)
lim = 3e-2
if target_std is not None:
self.assertGreater(lim, abs(output.std() - target_std))
if target_mean is not None:
self.assertGreater(lim, abs(output.mean() - target_mean))
if target_max is not None:
self.assertGreater(lim, abs(output.max() - target_max))
if target_min is not None:
self.assertGreater(lim, abs(output.min() - target_min))
def _partition_test(self, init):
full_shape = (4, 2)
partition_shape = (2, 2)
partition_offset = (0, 0)
full_value = self.evaluate(init(full_shape, dtype=dtypes.float32))
got = self.evaluate(
init(
full_shape,
dtype=dtypes.float32,
partition_shape=partition_shape,
partition_offset=partition_offset))
self.assertEqual(got.shape, partition_shape)
self.assertAllClose(
got, array_ops.slice(full_value, partition_offset, partition_shape))
class ConstantInitializersTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testZeros(self):
self._range_test(init_ops_v2.Zeros(), shape=(4, 5),
target_mean=0., target_max=0.)
@test_util.run_in_graph_and_eager_modes
def testZerosPartition(self):
init = init_ops_v2.Zeros()
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testZerosInvalidKwargs(self):
init = init_ops_v2.Zeros()
with self.assertRaisesWithLiteralMatch(TypeError,
r"Unknown keyword arguments: dtpye"):
init((2, 2), dtpye=dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def testOnes(self):
self._range_test(init_ops_v2.Ones(), shape=(4, 5),
target_mean=1., target_max=1.)
@test_util.run_in_graph_and_eager_modes
def testOnesPartition(self):
init = init_ops_v2.Ones()
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testConstantInt(self):
self._range_test(
init_ops_v2.Constant(2),
shape=(5, 6, 4),
target_mean=2,
target_max=2,
target_min=2)
@test_util.run_in_graph_and_eager_modes
def testConstantPartition(self):
init = init_ops_v2.Constant([1, 2, 3, 4])
with self.assertRaisesWithLiteralMatch(
ValueError,
r"Constant initializer doesn't support partition-related arguments"):
init((4, 2), dtype=dtypes.float32, partition_shape=(2, 2))
@test_util.run_in_graph_and_eager_modes
def testConstantTuple(self):
init = init_ops_v2.constant_initializer((10, 20, 30))
tensor = init(shape=[3])
self.assertAllEqual(self.evaluate(tensor), [10, 20, 30])
self.assertEqual(tensor.shape, [3])
@test_util.run_in_graph_and_eager_modes
def testConstantInvalidValue(self):
c = constant_op.constant([1.0, 2.0, 3.0])
with self.assertRaisesRegex(TypeError,
r"Invalid type for initial value: .*Tensor.*"):
init_ops_v2.constant_initializer(c)
v = variables.Variable([3.0, 2.0, 1.0])
with self.assertRaisesRegex(
TypeError, r"Invalid type for initial value: .*Variable.*"):
init_ops_v2.constant_initializer(v)
def _testNDimConstantInitializer(self, value, shape, expected):
with test_util.use_gpu():
init = init_ops_v2.constant_initializer(value)
x = init(shape)
actual = self.evaluate(array_ops.reshape(x, [-1]))
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
@test_util.run_in_graph_and_eager_modes
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer(value, shape, expected)
self._testNDimConstantInitializer(np.asarray(value), shape, expected)
self._testNDimConstantInitializer(np.asarray(value).reshape(tuple(shape)),
shape, expected)
def _testNDimConstantInitializerIncorrectNumberValues(self, value, shape):
with test_util.use_gpu():
init = init_ops_v2.constant_initializer(value)
self.assertRaises(TypeError,
init,
shape=shape)
@test_util.run_in_graph_and_eager_modes
def testNDimConstantInitializerIncorrectNumberValues(self):
value = [0, 1, 2, 3, 4, 5]
for shape in [[2, 4], [2, 2]]:
self._testNDimConstantInitializerIncorrectNumberValues(value, shape)
self._testNDimConstantInitializerIncorrectNumberValues(np.asarray(value),
shape)
self._testNDimConstantInitializerIncorrectNumberValues(
np.asarray(value).reshape(tuple([2, 3])), shape)
class RandomUniformInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
shape = (20, 6, 7)
self._range_test(
init_ops_v2.RandomUniform(minval=-1, maxval=1, seed=124),
shape,
target_mean=0.,
target_max=1,
target_min=-1)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.RandomUniform(0, 7, seed=1)
init2 = init_ops_v2.RandomUniform(0, 7, seed=1)
self._identical_test(init1, init2, True)
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.RandomUniform(0, 7, seed=1)
init2 = init_ops_v2.RandomUniform(0, 7, seed=2)
self._identical_test(init1, init2, False)
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.RandomUniform(0.0, 1.0)
self._duplicated_test(init)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
init = init_ops_v2.RandomUniform(0, 7, seed=1)
self._partition_test(init)
class RandomNormalInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
self._range_test(
init_ops_v2.RandomNormal(mean=0, stddev=1, seed=153),
shape=(8, 12, 99),
target_mean=0.,
target_std=1)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.RandomNormal(0, 7, seed=1)
init2 = init_ops_v2.RandomNormal(0, 7, seed=1)
self._identical_test(init1, init2, True)
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.RandomNormal(0, 7, seed=1)
init2 = init_ops_v2.RandomNormal(0, 7, seed=2)
self._identical_test(init1, init2, False)
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.RandomNormal(0.0, 1.0)
self._duplicated_test(init)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
if test_util.is_xla_enabled():
self.skipTest(
"XLA ignores seeds for RandomNormal, skip xla-enabled test.")
init = init_ops_v2.RandomNormal(0, 7, seed=1)
self._partition_test(init)
class TruncatedNormalInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
self._range_test(
init_ops_v2.TruncatedNormal(mean=0, stddev=1, seed=126),
shape=(12, 99, 7),
target_mean=0.,
target_max=2,
target_min=-2)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Not seeming to work in Eager mode")
init1 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
init2 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
self._identical_test(init1, init2, True)
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
init2 = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=2)
self._identical_test(init1, init2, False)
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.TruncatedNormal(0.0, 1.0)
self._duplicated_test(init)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
init = init_ops_v2.TruncatedNormal(0.0, 1.0, seed=1)
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testInvalidDataType(self):
init = init_ops_v2.TruncatedNormal(0.0, 1.0)
with self.assertRaises(ValueError):
init([1], dtype=dtypes.int32)
class VarianceScalingInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testTruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="truncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "truncated_normal",
wraps=random_ops.truncated_normal) as mock_truncated_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="truncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "truncated_normal",
wraps=random_ops.truncated_normal) as mock_truncated_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUntruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(
distribution="untruncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "random_normal",
wraps=random_ops.random_normal) as mock_random_normal:
x = self.evaluate(init(shape))
self.assertTrue(mock_random_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="uniform")
with test_util.use_gpu():
x = self.evaluate(init(shape))
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
partition_shape = (100, 100)
shape = [1000, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops_v2.VarianceScaling(distribution="untruncated_normal")
with test_util.use_gpu(), test.mock.patch.object(
random_ops, "random_normal",
wraps=random_ops.random_normal) as mock_random_normal:
x = self.evaluate(init(shape, partition_shape=partition_shape))
self.assertTrue(mock_random_normal.called)
self.assertEqual(x.shape, partition_shape)
self.assertNear(np.mean(x), expect_mean, err=1e-3)
self.assertNear(np.var(x), expect_var, err=1e-3)
class OrthogonalInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
self._range_test(init_ops_v2.Orthogonal(seed=123), shape=(20, 20),
target_mean=0.)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.Orthogonal(seed=1)
init2 = init_ops_v2.Orthogonal(seed=1)
self._identical_test(init1, init2, True, (10, 10))
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.Orthogonal(seed=1)
init2 = init_ops_v2.Orthogonal(seed=2)
self._identical_test(init1, init2, False, (10, 10))
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.Orthogonal()
self._duplicated_test(init, (10, 10))
@test_util.run_in_graph_and_eager_modes
def testInvalidDataType(self):
init = init_ops_v2.Orthogonal()
self.assertRaises(ValueError, init, shape=(10, 10), dtype=dtypes.string)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
init = init_ops_v2.Orthogonal()
with test_util.use_gpu():
self.assertRaises(ValueError, init, shape=[5])
@test_util.run_in_graph_and_eager_modes
def testGain(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.Orthogonal(seed=1)
init2 = init_ops_v2.Orthogonal(gain=3.14, seed=1)
with test_util.use_gpu():
t1 = self.evaluate(init1(shape=(10, 10)))
t2 = self.evaluate(init2(shape=(10, 10)))
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_in_graph_and_eager_modes
def testShapesValues(self):
if test.is_built_with_rocm():
self.skipTest("Disable subtest on ROCm due to missing QR op support")
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops_v2.Orthogonal()
tol = 1e-5
with test_util.use_gpu():
# Check the shape
t = self.evaluate(init(shape))
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
@test_util.run_in_graph_and_eager_modes
def testPartition(self):
init = init_ops_v2.Orthogonal(seed=1)
with self.assertRaisesWithLiteralMatch(
ValueError,
r"Orthogonal initializer doesn't support partition-related arguments"):
init((4, 2), dtype=dtypes.float32, partition_shape=(2, 2))
class IdentityInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRange(self):
with self.assertRaises(ValueError):
shape = (3, 4, 5)
self._range_test(
init_ops_v2.Identity(),
shape=shape,
target_mean=1. / shape[0],
target_max=1.)
shape = (3, 3)
self._range_test(
init_ops_v2.Identity(),
shape=shape,
target_mean=1. / shape[0],
target_max=1.)
@test_util.run_in_graph_and_eager_modes
def testInvalidDataType(self):
init = init_ops_v2.Identity()
self.assertRaises(ValueError, init, shape=[10, 5], dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
init = init_ops_v2.Identity()
with test_util.use_gpu():
self.assertRaises(ValueError, init, shape=[5, 7, 7])
self.assertRaises(ValueError, init, shape=[5])
self.assertRaises(ValueError, init, shape=[])
@test_util.run_in_graph_and_eager_modes
def testNonSquare(self):
init = init_ops_v2.Identity()
shape = (10, 5)
with test_util.use_gpu():
self.assertAllClose(self.evaluate(init(shape)), np.eye(*shape))
@test_util.run_in_graph_and_eager_modes
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init_default = init_ops_v2.Identity()
init_custom = init_ops_v2.Identity(gain=0.9)
with test_util.use_gpu():
self.assertAllClose(self.evaluate(init_default(shape, dtype=dtype)),
np.eye(*shape))
with test_util.use_gpu():
self.assertAllClose(self.evaluate(init_custom(shape, dtype=dtype)),
np.eye(*shape) * 0.9)
@test_util.run_in_graph_and_eager_modes
def testPartition(self):
init = init_ops_v2.Identity()
with self.assertRaisesWithLiteralMatch(
ValueError,
r"Identity initializer doesn't support partition-related arguments"):
init((4, 2), dtype=dtypes.float32, partition_shape=(2, 2))
class GlorotInitializersTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testGlorotUniform(self):
shape = (5, 6, 4, 2)
fan_in, fan_out = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._range_test(
init_ops_v2.GlorotUniform(seed=123),
shape,
target_mean=0.,
target_std=std)
@test_util.run_in_graph_and_eager_modes
def test_GlorotNormal(self):
shape = (5, 6, 4, 2)
fan_in, fan_out = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._range_test(
init_ops_v2.GlorotNormal(seed=123),
shape,
target_mean=0.,
target_std=std)
class MethodInitializers(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testLecunUniform(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(1. / fan_in)
self._range_test(
init_ops_v2.lecun_uniform(seed=123),
shape,
target_mean=0.,
target_std=std)
@test_util.run_in_graph_and_eager_modes
def testHeUniform(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / fan_in)
self._range_test(
init_ops_v2.he_uniform(seed=123),
shape,
target_mean=0.,
target_std=std)
@test_util.run_in_graph_and_eager_modes
def testLecunNormal(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(1. / fan_in)
self._range_test(
init_ops_v2.lecun_normal(seed=123),
shape,
target_mean=0.,
target_std=std)
@test_util.run_in_graph_and_eager_modes
def testHeNormal(self):
shape = (5, 6, 4, 2)
fan_in, _ = init_ops_v2._compute_fans(shape)
std = np.sqrt(2. / fan_in)
self._range_test(
init_ops_v2.he_normal(seed=123),
shape,
target_mean=0.,
target_std=std)
if __name__ == "__main__":
test.main()
| {
"content_hash": "1dae57087c60a5d39c3fbbe229af76d0",
"timestamp": "",
"source": "github",
"line_count": 595,
"max_line_length": 80,
"avg_line_length": 34.023529411764706,
"alnum_prop": 0.6488342224856748,
"repo_name": "freedomtan/tensorflow",
"id": "d524f1e1fc3cc4e6fb3b9cc280efed7bbbb95d02",
"size": "20933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/init_ops_v2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from .compat import *
import binascii
#This code is shared with tackpy (somewhat), so I'd rather make minimal
#changes, and preserve the use of a2b_base64 throughout.
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE"::
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input name string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example::
Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg=
-----END TACK BREAK SIG-----
Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY=
-----END TACK BREAK SIG-----
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE"::
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = ""
while s1:
s2 += s1[:64] + "\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name) + s2 + \
("-----END %s-----\n" % name)
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
| {
"content_hash": "4f56fe20539f85a6e176f3f3fea8216d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 37.177083333333336,
"alnum_prop": 0.6251050714485851,
"repo_name": "rysson/filmkodi",
"id": "548bc34d248bdb43edccd7049b54785bd29caab3",
"size": "3667",
"binary": false,
"copies": "93",
"ref": "refs/heads/master",
"path": "plugin.video.fanfilm/resources/lib/libraries/f4mproxy/utils/pem.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
} |
from mox3 import mox
from neutronclient.v2_0 import client
from oslotest import base
from kuryr.lib import binding
from kuryr.lib import constants as lib_const
from kuryr_libnetwork import app
from kuryr_libnetwork import constants as const
from kuryr_libnetwork import controllers
from kuryr_libnetwork import utils
TOKEN = 'testtoken'
ENDURL = 'localurl'
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
def setUp(self):
super(TestCase, self).setUp()
app.config['DEBUG'] = True
app.config['TESTING'] = True
self.app = app.test_client()
self.app.neutron = client.Client(token=TOKEN, endpoint_url=ENDURL)
app.tag = True
class TestKuryrBase(TestCase):
"""Base class for all Kuryr unittests."""
def setUp(self):
super(TestKuryrBase, self).setUp()
self.mox = mox.Mox()
controllers.neutron_client()
self.app.neutron.format = 'json'
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
if hasattr(app, 'DEFAULT_POOL_IDS'):
del app.DEFAULT_POOL_IDS
def _mock_out_binding(self, endpoint_id, neutron_port,
neutron_subnets, neutron_network=None):
self.mox.StubOutWithMock(binding, 'port_bind')
fake_binding_response = (
'fake-veth', 'fake-veth_c', ('fake stdout', ''))
binding.port_bind(endpoint_id, neutron_port,
neutron_subnets,
neutron_network).AndReturn(
fake_binding_response)
self.mox.ReplayAll()
return fake_binding_response
def _mock_out_unbinding(self, endpoint_id, neutron_port):
self.mox.StubOutWithMock(binding, 'port_unbind')
fake_unbinding_response = ('fake stdout', '')
binding.port_unbind(endpoint_id, neutron_port).AndReturn(
fake_unbinding_response)
self.mox.ReplayAll()
return fake_unbinding_response
def _mock_out_network(self, neutron_network_id, docker_network_id,
check_existing=False):
no_networks_response = {
"networks": []
}
fake_list_response = {
"networks": [{
"status": "ACTIVE",
"subnets": [],
"admin_state_up": True,
"tenant_id": "9bacb3c5d39d41a79512987f338cf177",
"router:external": False,
"segments": [],
"shared": False,
"id": neutron_network_id
}]
}
self.mox.StubOutWithMock(app.neutron, 'list_networks')
t = utils.make_net_tags(docker_network_id)
if check_existing:
te = t + ',' + const.KURYR_EXISTING_NEUTRON_NET
app.neutron.list_networks(tags=te).AndReturn(
no_networks_response)
app.neutron.list_networks(tags=t).AndReturn(fake_list_response)
self.mox.ReplayAll()
return fake_list_response
@staticmethod
def _get_fake_v4_subnetpools(subnetpool_id, prefixes=["192.168.1.0/24"],
name="kuryr"):
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2-ext.html#listSubnetPools # noqa
v4_subnetpools = {
"subnetpools": [{
"min_prefixlen": "24",
"address_scope_id": None,
"default_prefixlen": "24",
"id": subnetpool_id,
"max_prefixlen": "24",
"name": name,
"default_quota": None,
"tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"prefixes": prefixes,
"ip_version": 4,
"shared": False
}]
}
return v4_subnetpools
@staticmethod
def _get_fake_v6_subnetpools(subnetpool_id, prefixes=['fe80::/64']):
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2-ext.html#listSubnetPools # noqa
v6_subnetpools = {
"subnetpools": [{
"min_prefixlen": "64",
"address_scope_id": None,
"default_prefixlen": "64",
"id": subnetpool_id,
"max_prefixlen": "64",
"name": "kuryr6",
"default_quota": None,
"tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"prefixes": prefixes,
"ip_version": 6,
"shared": False
}]
}
return v6_subnetpools
@staticmethod
def _get_fake_subnets(docker_endpoint_id, neutron_network_id,
fake_neutron_subnet_v4_id,
fake_neutron_subnet_v6_id):
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2.html#createSubnet # noqa
fake_subnet_response = {
"subnets": [
{"name": '-'.join([docker_endpoint_id, '192.168.1.0']),
"network_id": neutron_network_id,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [{"start": "192.168.1.2",
"end": "192.168.1.254"}],
"gateway_ip": "192.168.1.1",
"ip_version": 4,
"cidr": "192.168.1.0/24",
"id": fake_neutron_subnet_v4_id,
"enable_dhcp": True,
"subnetpool_id": ''},
{"name": '-'.join([docker_endpoint_id, 'fe80::']),
"network_id": neutron_network_id,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [{"start": "fe80::f816:3eff:fe20:57c4",
"end": "fe80::ffff:ffff:ffff:ffff"}],
"gateway_ip": "fe80::f816:3eff:fe20:57c3",
"ip_version": 6,
"cidr": "fe80::/64",
"id": fake_neutron_subnet_v6_id,
"enable_dhcp": True,
"subnetpool_id": ''}
]
}
return fake_subnet_response
@staticmethod
def _get_fake_port(docker_endpoint_id, neutron_network_id,
neutron_port_id,
neutron_port_status=lib_const.PORT_STATUS_DOWN,
neutron_subnet_v4_id=None,
neutron_subnet_v6_id=None,
neutron_subnet_v4_address="192.168.1.2",
neutron_subnet_v6_address="fe80::f816:3eff:fe20:57c4"):
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2.html#createPort # noqa
fake_port = {
'port': {
"status": neutron_port_status,
"name": utils.get_neutron_port_name(docker_endpoint_id),
"allowed_address_pairs": [],
"admin_state_up": True,
"network_id": neutron_network_id,
"tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa",
"device_owner": "",
"mac_address": "fa:16:3e:20:57:c3",
"fixed_ips": [],
"id": neutron_port_id,
"security_groups": [],
"device_id": ""
}
}
if neutron_subnet_v4_id:
fake_port['port']['fixed_ips'].append({
"subnet_id": neutron_subnet_v4_id,
"ip_address": neutron_subnet_v4_address
})
if neutron_subnet_v6_id:
fake_port['port']['fixed_ips'].append({
"subnet_id": neutron_subnet_v6_id,
"ip_address": neutron_subnet_v6_address
})
return fake_port
@classmethod
def _get_fake_ports(cls, docker_endpoint_id, neutron_network_id,
fake_neutron_port_id, neutron_port_status,
fake_neutron_subnet_v4_id, fake_neutron_subnet_v6_id):
fake_port = cls._get_fake_port(
docker_endpoint_id, neutron_network_id,
fake_neutron_port_id, neutron_port_status,
fake_neutron_subnet_v4_id, fake_neutron_subnet_v6_id)
fake_port = fake_port['port']
fake_ports = {
'ports': [
fake_port
]
}
return fake_ports
@staticmethod
def _get_fake_v4_subnet(neutron_network_id, docker_endpoint_id=None,
subnet_v4_id=None, subnetpool_id=None,
cidr='192.168.1.0/24',
name=None):
if not name:
name = str('-'.join([docker_endpoint_id,
'192.168.1.0']))
fake_v4_subnet = {
'subnet': {
"name": name,
"network_id": neutron_network_id,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [{
"start": "192.168.1.2",
"end": "192.168.1.254"
}],
"gateway_ip": "192.168.1.1",
"ip_version": 4,
"cidr": '192.168.1.0/24',
"id": subnet_v4_id,
"enable_dhcp": True,
"subnetpool_id": ''
}
}
if subnetpool_id:
fake_v4_subnet['subnet'].update(subnetpool_id=subnetpool_id)
return fake_v4_subnet
@staticmethod
def _get_fake_v6_subnet(docker_network_id, docker_endpoint_id,
subnet_v6_id, subnetpool_id=None):
fake_v6_subnet = {
'subnet': {
"name": '-'.join([docker_endpoint_id,
'fe80::']),
"network_id": docker_network_id,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [{
"start": "fe80::f816:3eff:fe20:57c4",
"end": "fe80::ffff:ffff:ffff:ffff"
}],
"gateway_ip": "fe80::f816:3eff:fe20:57c3",
"ip_version": 6,
"cidr": 'fe80::/64',
"id": subnet_v6_id,
"enable_dhcp": True
}
}
if subnetpool_id:
fake_v6_subnet['subnet'].update(subnetpool_id=subnetpool_id)
return fake_v6_subnet
class TestKuryrFailures(TestKuryrBase):
"""Unitests for checking if Kuryr handles the failures appropriately."""
| {
"content_hash": "d686b1ce9691b986da67a4533b2cc976",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 97,
"avg_line_length": 38.195035460992905,
"alnum_prop": 0.5080308235075666,
"repo_name": "celebdor/kuryr-libnetwork",
"id": "b63e759c0668e31be03d86b2b70dbc10fd9c85cb",
"size": "11317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kuryr_libnetwork/tests/unit/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "246922"
},
{
"name": "Ruby",
"bytes": "1448"
},
{
"name": "Shell",
"bytes": "13770"
}
],
"symlink_target": ""
} |
"""Contains the global :class:`.EventManager` instance."""
__all__ = ['eventMgr']
from . import EventManager
#: The global event manager.
eventMgr = EventManager.EventManager()
| {
"content_hash": "5b48ac6c804ec96db19fe4d399f4a1ec",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 58,
"avg_line_length": 22.5,
"alnum_prop": 0.7111111111111111,
"repo_name": "chandler14362/panda3d",
"id": "73a35b8ba52e719d2421918b8c0cf8c16bc9b942",
"size": "180",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "direct/src/showbase/EventManagerGlobal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "5288285"
},
{
"name": "C++",
"bytes": "27114399"
},
{
"name": "Emacs Lisp",
"bytes": "229264"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3113"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "61448"
},
{
"name": "Nemerle",
"bytes": "3001"
},
{
"name": "Objective-C",
"bytes": "27625"
},
{
"name": "Objective-C++",
"bytes": "258129"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl 6",
"bytes": "27055"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5568942"
},
{
"name": "R",
"bytes": "421"
},
{
"name": "Roff",
"bytes": "3432"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import errno
import hashlib
import os
import shutil
import tempfile
from muninn._compat import string_types as basestring
from muninn._compat import path_utf8, encode, decode
class TemporaryDirectory(object):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def __enter__(self):
self._path = tempfile.mkdtemp(*self._args, **self._kwargs)
return self._path
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self._path)
return False
def split_path(path):
"""Recursively apply os.path.split() to split a path into a sequence of path segments.
Example:
[segment for segment in split_path("/a/b/c/d.txt")] returns ["/", "a", "b", "c", "d.txt"]
"""
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head.endswith(os.sep):
yield os.sep
elif head:
for segment in split_path(head):
yield segment
if tail:
yield tail
def is_sub_path(sub_path, path, allow_equal=False):
"""Determine if a path is a sub path of (is contained in) another path.
The 'allow_equal' flag determines whether paths that are equal are considered sub paths of eachother or not.
Both paths are split into separate segments using os.split(), and compared segment by segment. This avoids the
problem where "/a/bb/c" is considered a sub path of "/a/b", as would happen when comparing using str.startswith()
or os.path.commonprefix().
"""
sub_path_segments = [segment for segment in split_path(sub_path)]
path_segments = [segment for segment in split_path(path)]
if allow_equal:
if len(sub_path_segments) < len(path_segments):
return False
else:
if len(sub_path_segments) <= len(path_segments):
return False
for sub_path_segment, path_segment in zip(sub_path_segments, path_segments):
if sub_path_segment != path_segment:
return False
return True
def make_path(path, mode=0o777):
"""Try to create the specified path, creating parent directories where needed. If the path already exists, this is
_not_ considered an error. This is similar to "mkdir -p" and in constrast to os.makedirs(). The latter raises an
exception if the leaf directory exists.
Keyword arguments:
mode -- On some systems, mode is ignored. Where it is used, the current umask value is first masked out.
The default mode is 0777 (octal). See also the documentation of os.mkdir().
"""
try:
os.makedirs(path)
except EnvironmentError as _error:
# If the leaf directory cannot be created because an entity with the same name already exists, do not consider
# this an error _if_ this entity is (a symbolic link to) a directory, instead of e.g. a file.
if _error.errno != errno.EEXIST or not os.path.isdir(path):
raise
def copy_path(source, target, resolve_root=False, resolve_links=False):
"""Recursively copy the source path to the destination path. The destination path should not exist. Directories are
copied as (newly created) directories with the same names, files are copied by copying their contents
(using shutil.copy()).
Keyword arguments:
resolve_root -- If set to True and if the top-level file/directory for the source tree is a symbolic link then the
source tree is copied by copying the linked target to the destination tree.
If set to False, the top-level link in the source tree is copied based on the setting of
the resolve_links parameter.
resolve_links -- If set to True, all symbolic links in the source tree are copied by recursively copying the linked
targets to the destination tree. Note that this could lead to infinite recursion.
If set to False, symbolic links in the source tree are copied as (newly created) symbolic links
in the destination tree.
"""
def _copy_path_rec(source, target, resolve_root, resolve_links):
assert os.path.exists(source) or os.path.islink(source)
# Refuse to copy into a dangling symlink.
if os.path.islink(target) and not os.path.exists(target):
raise IOError("target is a dangling symlink: %s" % target)
if os.path.islink(source) and not (resolve_links or resolve_root):
if os.path.exists(target):
# This will fail if target is a directory, which is the intended behaviour.
os.remove(target)
# If target is a dangling symlink, os.path.exists() returns False and the creation of the symlink below
# will fail, which is the intended behaviour.
os.symlink(os.readlink(source), target)
elif os.path.isdir(source):
if not os.path.exists(target):
# This will fail if target exists, which is the intended behaviour. Note that if the target is a
# dangling symlink, the creation of the directory below will fail as well, which is intended.
os.mkdir(target)
for basename in os.listdir(source):
source_path = os.path.join(source, basename)
target_path = os.path.join(target, basename)
# The resolve_root option should only have an effect during the initial call to _copy_path_rec().
_copy_path_rec(source_path, target_path, False, resolve_links)
else:
shutil.copyfile(source, target)
shutil.copystat(source, target)
# If the source ends in a path separator and it is a symlink to a directory, then the symlink will be resolved even
# if resolve_root is set to False. Disallow a root that refers to a file and has a trailing path separator.
if source.endswith(os.path.sep):
if not os.path.isdir(os.path.dirname(source)):
raise IOError("not a directory: %s" % source)
else:
resolve_root = True
# If the target is a directory, copy the source _into_ the target, unless the source path ends in a path separator.
if os.path.isdir(target) and not source.endswith(os.path.sep):
target = os.path.join(target, os.path.basename(source))
# Perform the recursive copy.
_copy_path_rec(source, target, resolve_root, resolve_links)
def remove_path(path):
if not os.path.isdir(path) or os.path.islink(path):
os.remove(path)
else:
shutil.rmtree(path)
def hash_string(string, hash_func):
hash = hash_func()
hash.update(string)
return encode(hash.hexdigest())
def hash_file(path, block_size, hash_func):
hash = hash_func()
with open(path, "rb") as stream:
while True:
# Read a block of character data.
data = stream.read(block_size)
if not data:
return encode(hash.hexdigest())
# Update hash.
hash.update(data)
# NB. os.path.islink() can be True even if neither os.path.isdir() nor os.path.isfile() is True.
# NB. os.path.exists() is False for a dangling symbolic link, even if the symbolic link itself does exist.
def product_hash(roots, resolve_root=True, resolve_links=False, force_encapsulation=False,
block_size=65536, hash_type=None):
hash_func = getattr(hashlib, hash_type or 'sha1')
def _product_hash_rec(root, resolve_root, resolve_links, hash_func, block_size):
if os.path.islink(root) and not (resolve_root or resolve_links):
# Hash link _contents_.
return hash_string(path_utf8(os.readlink(root)), hash_func)
elif os.path.isfile(root):
# Hash file contents.
return hash_file(root, block_size, hash_func)
elif os.path.isdir(root):
# Create a fingerprint of the directory by computing the hash of (for each entry in the directory) the hash
# of the entry name, the type of entry (link, file, or directory), and the hash of the contents of the
# entry.
hash = hash_func()
for basename in sorted(os.listdir(root)):
hash.update(hash_string(path_utf8(basename), hash_func))
path = os.path.join(root, basename)
if os.path.islink(path) and not (resolve_root or resolve_links):
hash.update(b"l")
elif os.path.isdir(path):
hash.update(b"d")
else:
hash.update(b"f")
hash.update(_product_hash_rec(path, False, resolve_links, hash_func, block_size))
return encode(hash.hexdigest())
else:
raise IOError("path does not refer to a regular file or directory: %s" % root)
if isinstance(roots, basestring):
roots = [roots]
if len(roots) == 1 and not force_encapsulation:
return hash_type + ':' + decode(_product_hash_rec(roots[0], resolve_root, resolve_links, hash_func, block_size))
hash = hash_func()
for root in sorted(roots):
hash.update(hash_string(path_utf8(os.path.basename(root)), hash_func))
if os.path.islink(root) and not (resolve_root or resolve_links):
hash.update(b"l")
elif os.path.isdir(root):
hash.update(b"d")
else:
hash.update(b"f")
hash.update(_product_hash_rec(root, resolve_root, resolve_links, hash_func, block_size))
return hash_type + ':' + hash.hexdigest()
def product_size(roots, resolve_root=True, resolve_links=False):
def _product_size_rec(root, resolve_root, resolve_links):
if os.path.islink(root) and not (resolve_root or resolve_links):
# Use size of the symbolic link itself (instead of the size of its target).
return os.lstat(root).st_size
elif os.path.isfile(root):
return os.stat(root).st_size
elif os.path.isdir(root):
return sum([_product_size_rec(os.path.join(root, base), False, resolve_links) for base in os.listdir(root)])
else:
raise IOError("path does not refer to a regular file or directory: %s" % root)
if isinstance(roots, basestring):
roots = [roots]
return sum([_product_size_rec(root, resolve_root, resolve_links) for root in roots])
def quoted_list(lst, quote_text='"', join_text=", "):
'''returns a string where all items are surrounded by quotes and joined'''
return join_text.join([quote_text + str(x) + quote_text for x in lst])
| {
"content_hash": "e236e8149b18f073889b147d7877a49e",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 120,
"avg_line_length": 39.985074626865675,
"alnum_prop": 0.6364315042926465,
"repo_name": "stcorp/muninn",
"id": "c9f09691a0f924fb34a4138fed3d88b283069abb",
"size": "10771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muninn/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "458802"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
"""Utilities for identifying local IP addresses."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import re
import socket
import subprocess
from subprocess import Popen, PIPE
from warnings import warn
LOCAL_IPS = []
PUBLIC_IPS = []
LOCALHOST = ''
def _uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
maintaining the order in which they first appear.
From ipython_genutils.data
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)]
def _get_output(cmd):
"""Get output of a command, raising IOError if it fails"""
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = Popen(cmd, stdout=PIPE, stderr=PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate()
if p.returncode:
raise IOError("Failed to run %s: %s" % (cmd, stderr.decode('utf8', 'replace')))
return stdout.decode('utf8', 'replace')
def _only_once(f):
"""decorator to only run a function once"""
f.called = False
def wrapped(**kwargs):
if f.called:
return
ret = f(**kwargs)
f.called = True
return ret
return wrapped
def _requires_ips(f):
"""decorator to ensure load_ips has been run before f"""
def ips_loaded(*args, **kwargs):
_load_ips()
return f(*args, **kwargs)
return ips_loaded
# subprocess-parsing ip finders
class NoIPAddresses(Exception):
pass
def _populate_from_list(addrs):
"""populate local and public IPs from flat list of all IPs"""
if not addrs:
raise NoIPAddresses
global LOCALHOST
public_ips = []
local_ips = []
for ip in addrs:
local_ips.append(ip)
if not ip.startswith('127.'):
public_ips.append(ip)
elif not LOCALHOST:
LOCALHOST = ip
if not LOCALHOST:
LOCALHOST = '127.0.0.1'
local_ips.insert(0, LOCALHOST)
local_ips.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = _uniq_stable(local_ips)
PUBLIC_IPS[:] = _uniq_stable(public_ips)
_ifconfig_ipv4_pat = re.compile(r'inet\b.*?(\d+\.\d+\.\d+\.\d+)', re.IGNORECASE)
def _load_ips_ifconfig():
"""load ip addresses from `ifconfig` output (posix)"""
try:
out = _get_output('ifconfig')
except (IOError, OSError):
# no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH
out = _get_output('/sbin/ifconfig')
lines = out.splitlines()
addrs = []
for line in lines:
m = _ifconfig_ipv4_pat.match(line.strip())
if m:
addrs.append(m.group(1))
_populate_from_list(addrs)
def _load_ips_ip():
"""load ip addresses from `ip addr` output (Linux)"""
out = _get_output(['ip', '-f', 'inet', 'addr'])
lines = out.splitlines()
addrs = []
for line in lines:
blocks = line.lower().split()
if (len(blocks) >= 2) and (blocks[0] == 'inet'):
addrs.append(blocks[1].split('/')[0])
_populate_from_list(addrs)
_ipconfig_ipv4_pat = re.compile(r'ipv4.*?(\d+\.\d+\.\d+\.\d+)$', re.IGNORECASE)
def _load_ips_ipconfig():
"""load ip addresses from `ipconfig` output (Windows)"""
out = _get_output('ipconfig')
lines = out.splitlines()
addrs = []
for line in lines:
m = _ipconfig_ipv4_pat.match(line.strip())
if m:
addrs.append(m.group(1))
_populate_from_list(addrs)
def _load_ips_netifaces():
"""load ip addresses with netifaces"""
import netifaces
global LOCALHOST
local_ips = []
public_ips = []
# list of iface names, 'lo0', 'eth0', etc.
for iface in netifaces.interfaces():
# list of ipv4 addrinfo dicts
ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])
for entry in ipv4s:
addr = entry.get('addr')
if not addr:
continue
if not (iface.startswith('lo') or addr.startswith('127.')):
public_ips.append(addr)
elif not LOCALHOST:
LOCALHOST = addr
local_ips.append(addr)
if not LOCALHOST:
# we never found a loopback interface (can this ever happen?), assume common default
LOCALHOST = '127.0.0.1'
local_ips.insert(0, LOCALHOST)
local_ips.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = _uniq_stable(local_ips)
PUBLIC_IPS[:] = _uniq_stable(public_ips)
def _load_ips_gethostbyname():
"""load ip addresses with socket.gethostbyname_ex
This can be slow.
"""
global LOCALHOST
try:
LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2]
except socket.error:
# assume common default
LOCAL_IPS[:] = ['127.0.0.1']
try:
hostname = socket.gethostname()
PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2]
# try hostname.local, in case hostname has been short-circuited to loopback
if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS):
PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2]
except socket.error:
pass
finally:
PUBLIC_IPS[:] = _uniq_stable(PUBLIC_IPS)
LOCAL_IPS.extend(PUBLIC_IPS)
# include all-interface aliases: 0.0.0.0 and ''
LOCAL_IPS.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = _uniq_stable(LOCAL_IPS)
LOCALHOST = LOCAL_IPS[0]
def _load_ips_dumb():
"""Fallback in case of unexpected failure"""
global LOCALHOST
LOCALHOST = '127.0.0.1'
LOCAL_IPS[:] = [LOCALHOST, '0.0.0.0', '']
PUBLIC_IPS[:] = []
@_only_once
def _load_ips(suppress_exceptions=True):
"""load the IPs that point to this machine
This function will only ever be called once.
It will use netifaces to do it quickly if available.
Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate.
Finally, it will fallback on socket.gethostbyname_ex, which can be slow.
"""
try:
# first priority, use netifaces
try:
return _load_ips_netifaces()
except ImportError:
pass
# second priority, parse subprocess output (how reliable is this?)
if os.name == 'nt':
try:
return _load_ips_ipconfig()
except (IOError, NoIPAddresses):
pass
else:
try:
return _load_ips_ip()
except (IOError, OSError, NoIPAddresses):
pass
try:
return _load_ips_ifconfig()
except (IOError, OSError, NoIPAddresses):
pass
# lowest priority, use gethostbyname
return _load_ips_gethostbyname()
except Exception as e:
if not suppress_exceptions:
raise
# unexpected error shouldn't crash, load dumb default values instead.
warn("Unexpected error discovering local network interfaces: %s" % e)
_load_ips_dumb()
@_requires_ips
def local_ips():
"""return the IP addresses that point to this machine"""
return LOCAL_IPS
@_requires_ips
def public_ips():
"""return the IP addresses for this machine that are visible to other machines"""
return PUBLIC_IPS
@_requires_ips
def localhost():
"""return ip for localhost (almost always 127.0.0.1)"""
return LOCALHOST
@_requires_ips
def is_local_ip(ip):
"""does `ip` point to this machine?"""
return ip in LOCAL_IPS
@_requires_ips
def is_public_ip(ip):
"""is `ip` a publicly visible address?"""
return ip in PUBLIC_IPS
| {
"content_hash": "fdec0419e4130a2421e4e8b3e1edc4d3",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 97,
"avg_line_length": 28.72992700729927,
"alnum_prop": 0.5932418699186992,
"repo_name": "lancezlin/ml_template_py",
"id": "b0ffdb0491e81dafb8d8f5cee8d2933a8d9560f8",
"size": "7872",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/jupyter_client/localinterfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
} |
from automaton import exceptions as automaton_exceptions
from automaton import machines
import six
"""State machine modelling.
This is being used in the implementation of:
http://specs.openstack.org/openstack/ironic-specs/specs/kilo/new-ironic-state-machine.html
"""
from ironic.common import exception as excp
from ironic.common.i18n import _
def _translate_excp(func):
"""Decorator to translate automaton exceptions into ironic exceptions."""
@six.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (automaton_exceptions.InvalidState,
automaton_exceptions.NotInitialized,
automaton_exceptions.FrozenMachine,
automaton_exceptions.NotFound) as e:
raise excp.InvalidState(six.text_type(e))
except automaton_exceptions.Duplicate as e:
raise excp.Duplicate(six.text_type(e))
return wrapper
class FSM(machines.FiniteMachine):
"""An ironic state-machine class with some ironic specific additions."""
def __init__(self):
super(FSM, self).__init__()
self._target_state = None
# For now make these raise ironic state machine exceptions until
# a later period where these should(?) be using the raised automaton
# exceptions directly.
add_transition = _translate_excp(machines.FiniteMachine.add_transition)
process_event = _translate_excp(machines.FiniteMachine.process_event)
@property
def target_state(self):
return self._target_state
@_translate_excp
def add_state(self, state, on_enter=None, on_exit=None,
target=None, terminal=None, stable=False):
"""Adds a given state to the state machine.
:param stable: Use this to specify that this state is a stable/passive
state. A state must have been previously defined as
'stable' before it can be used as a 'target'
:param target: The target state for 'state' to go to. Before a state
can be used as a target it must have been previously
added and specified as 'stable'
Further arguments are interpreted as for parent method ``add_state``.
"""
if target is not None:
if target not in self._states:
raise excp.InvalidState(
_("Target state '%s' does not exist") % target)
if not self._states[target]['stable']:
raise excp.InvalidState(
_("Target state '%s' is not a 'stable' state") % target)
super(FSM, self).add_state(state, terminal=terminal,
on_enter=on_enter, on_exit=on_exit)
self._states[state].update({
'stable': stable,
'target': target,
})
def _post_process_event(self, event, result):
# Clear '_target_state' if we've reached it
if (self._target_state is not None and
self._target_state == self._current.name):
self._target_state = None
# If new state has a different target, update the '_target_state'
if self._states[self._current.name]['target'] is not None:
self._target_state = self._states[self._current.name]['target']
def is_valid_event(self, event):
"""Check whether the event is actionable in the current state."""
# TODO(harlowja): remove this when
# https://review.openstack.org/191955 merges and is made available.
current = self._current
if current is None:
return False
if self._states[current.name]['terminal']:
return False
if event not in self._transitions[current.name]:
return False
return True
@_translate_excp
def initialize(self, start_state=None):
super(FSM, self).initialize(start_state=start_state)
current_state = self._current.name
self._target_state = self._states[current_state]['target']
| {
"content_hash": "828a9cabd042130d996d63a68f22e7dc",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 90,
"avg_line_length": 37.638888888888886,
"alnum_prop": 0.6201722017220173,
"repo_name": "Tan0/ironic",
"id": "4076f8241d921e2d391ca0b2f680322dd3ffd21d",
"size": "4722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/common/fsm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3345381"
}
],
"symlink_target": ""
} |
import asyncio
from itertools import chain
from typing import Any, List
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.routing
import tornado.template
import tornado.web
from tornado.options import define, options
from bgmi.config import FRONT_STATIC_PATH, IS_WINDOWS, SAVE_PATH, TORNADO_SERVE_STATIC_FILES
from bgmi.front.admin import API_MAP_GET, API_MAP_POST, AdminApiHandler, UpdateHandler
from bgmi.front.index import BangumiListHandler, IndexHandler
from bgmi.front.resources import BangumiHandler, CalendarHandler, RssHandler
define("port", default=8888, help="listen on the port", type=int)
define("address", default="0.0.0.0", help="binding at given address", type=str)
def make_app() -> tornado.web.Application:
settings = {
"autoreload": True,
"gzip": True,
"debug": True,
}
api_actions = "|".join(chain(API_MAP_GET.keys(), API_MAP_POST.keys()))
handlers: List[Any] = [
(r"^/api/(old|index)", BangumiListHandler),
(r"^/resource/feed.xml$", RssHandler),
(r"^/resource/calendar.ics$", CalendarHandler),
(r"^/api/update", UpdateHandler),
(rf"^/api/(?P<action>{api_actions})", AdminApiHandler),
]
if TORNADO_SERVE_STATIC_FILES != "0":
handlers.extend(
[
(r"/bangumi/(.*)", tornado.web.StaticFileHandler, {"path": SAVE_PATH}),
(
r"^/(.*)$",
tornado.web.StaticFileHandler,
{"path": FRONT_STATIC_PATH, "default_filename": "index.html"},
),
]
)
else:
handlers.extend(
[
(r"^/bangumi/?(.*)", BangumiHandler),
(r"^/(.*)$", IndexHandler),
]
)
return tornado.web.Application(handlers, **settings) # type: ignore
def main() -> None:
if IS_WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
tornado.options.parse_command_line()
print(f"BGmi HTTP Server listening on {options.address}:{options.port:d}")
http_server = tornado.httpserver.HTTPServer(make_app())
http_server.listen(options.port, address=options.address)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| {
"content_hash": "6fd920c2205b45782719370a01c4206a",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 92,
"avg_line_length": 32.263888888888886,
"alnum_prop": 0.6198880757640981,
"repo_name": "BGmi/BGmi",
"id": "7057fb430345bea8d7312da27072fc9e1cd6c775",
"size": "2323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bgmi/front/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177992"
},
{
"name": "Shell",
"bytes": "4874"
},
{
"name": "VBScript",
"bytes": "163"
}
],
"symlink_target": ""
} |
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: pil.py 163 2006-11-13 04:15:46Z Alex.Holkner $'
import sys
from ctypes import *
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
from pyglet.window.carbon import carbon, quicktime, _oscheck
from pyglet.window.carbon.constants import _name
from pyglet.window.carbon.types import *
Handle = POINTER(POINTER(c_byte))
GWorldPtr = c_void_p
carbon.NewHandle.restype = Handle
HandleDataHandlerSubType = _name('hndl')
PointerDataHandlerSubType = _name('ptr ')
kDataHCanRead = 1
kDataRefExtensionFileName = _name('fnam')
kDataRefExtensionMIMEType = _name('mime')
ComponentInstance = c_void_p
k1MonochromePixelFormat = 0x00000001
k2IndexedPixelFormat = 0x00000002
k4IndexedPixelFormat = 0x00000004
k8IndexedPixelFormat = 0x00000008
k16BE555PixelFormat = 0x00000010
k24RGBPixelFormat = 0x00000018
k32ARGBPixelFormat = 0x00000020
k32BGRAPixelFormat = _name('BGRA')
k1IndexedGrayPixelFormat = 0x00000021
k2IndexedGrayPixelFormat = 0x00000022
k4IndexedGrayPixelFormat = 0x00000024
k8IndexedGrayPixelFormat = 0x00000028
kNativeEndianPixMap = 1 << 8
newMovieActive = 1
noErr = 0
movieTrackMediaType = 1 << 0
movieTrackCharacteristic = 1 << 1
movieTrackEnabledOnly = 1 << 2
VisualMediaCharacteristic = _name('eyes')
nextTimeMediaSample = 1
class PointerDataRefRecord(Structure):
_fields_ = [
('data', c_void_p),
('dataLength', c_long)
]
def Str255(value):
return create_string_buffer(chr(len(value)) + value)
class QuickTimeImageDecoder(ImageDecoder):
def get_file_extensions(self):
# Only most common ones shown here
return ['.bmp', '.cur', '.gif', '.ico', '.jpg', '.jpeg', '.pcx', '.png',
'.tga', '.tif', '.tiff', '.xbm', '.xpm']
def get_animation_file_extensions(self):
return ['.gif']
def _get_data_ref(self, file, filename):
self._data_hold = data = create_string_buffer(file.read())
dataref = carbon.NewHandle(sizeof(PointerDataRefRecord))
datarec = cast(dataref,
POINTER(POINTER(PointerDataRefRecord))).contents.contents
datarec.data = addressof(data)
datarec.dataLength = len(data)
self._data_handler_holder = data_handler = ComponentInstance()
r = quicktime.OpenADataHandler(dataref, PointerDataHandlerSubType,
None, 0, None, kDataHCanRead, byref(data_handler))
_oscheck(r)
extension_handle = Handle()
self._filename_hold = filename = Str255(filename)
r = carbon.PtrToHand(filename, byref(extension_handle), len(filename))
r = quicktime.DataHSetDataRefExtension(data_handler, extension_handle,
kDataRefExtensionFileName)
_oscheck(r)
quicktime.DisposeHandle(extension_handle)
quicktime.DisposeHandle(dataref)
dataref = c_void_p()
r = quicktime.DataHGetDataRef(data_handler, byref(dataref))
_oscheck(r)
quicktime.CloseComponent(data_handler)
return dataref
def _get_formats(self):
# TODO choose 24 bit where appropriate.
if sys.byteorder == 'big':
format = 'ARGB'
qtformat = k32ARGBPixelFormat
else:
format = 'BGRA'
qtformat = k32BGRAPixelFormat
return format, qtformat
def decode(self, file, filename):
dataref = self._get_data_ref(file, filename)
importer = ComponentInstance()
quicktime.GetGraphicsImporterForDataRef(dataref,
PointerDataHandlerSubType, byref(importer))
if not importer:
raise ImageDecodeException(filename or file)
rect = Rect()
quicktime.GraphicsImportGetNaturalBounds(importer, byref(rect))
width = rect.right
height = rect.bottom
format, qtformat = self._get_formats()
buffer = (c_byte * (width * height * len(format)))()
world = GWorldPtr()
quicktime.QTNewGWorldFromPtr(byref(world), qtformat,
byref(rect), c_void_p(), c_void_p(), 0, buffer,
len(format) * width)
quicktime.GraphicsImportSetGWorld(importer, world, c_void_p())
result = quicktime.GraphicsImportDraw(importer)
quicktime.DisposeGWorld(world)
quicktime.CloseComponent(importer)
if result != 0:
raise ImageDecodeException(filename or file)
pitch = len(format) * width
return ImageData(width, height, format, buffer, -pitch)
def decode_animation(self, file, filename):
# TODO: Stop playing chicken with the GC
# TODO: Cleanup in errors
quicktime.EnterMovies()
data_ref = self._get_data_ref(file, filename)
if not data_ref:
raise ImageDecodeException(filename or file)
movie = c_void_p()
id = c_short()
result = quicktime.NewMovieFromDataRef(byref(movie),
newMovieActive,
0,
data_ref,
PointerDataHandlerSubType)
if not movie:
#_oscheck(result)
raise ImageDecodeException(filename or file)
quicktime.GoToBeginningOfMovie(movie)
time_scale = float(quicktime.GetMovieTimeScale(movie))
format, qtformat = self._get_formats()
# Get movie width and height
rect = Rect()
quicktime.GetMovieBox(movie, byref(rect))
width = rect.right
height = rect.bottom
pitch = len(format) * width
# Set gworld
buffer = (c_byte * (width * height * len(format)))()
world = GWorldPtr()
quicktime.QTNewGWorldFromPtr(byref(world), qtformat,
byref(rect), c_void_p(), c_void_p(), 0, buffer,
len(format) * width)
quicktime.SetGWorld(world, 0)
quicktime.SetMovieGWorld(movie, world, 0)
visual = quicktime.GetMovieIndTrackType(movie, 1,
VisualMediaCharacteristic,
movieTrackCharacteristic)
if not visual:
raise ImageDecodeException('No video track')
time = 0
interesting_time = c_int()
quicktime.GetTrackNextInterestingTime(
visual,
nextTimeMediaSample,
time,
1,
byref(interesting_time),
None)
duration = interesting_time.value / time_scale
frames = []
while time >= 0:
result = quicktime.GetMoviesError()
if result == noErr:
# force redraw
result = quicktime.UpdateMovie(movie)
if result == noErr:
# process movie
quicktime.MoviesTask(movie, 0)
result = quicktime.GetMoviesError()
_oscheck(result)
buffer_copy = (c_byte * len(buffer))()
memmove(buffer_copy, buffer, len(buffer))
image = ImageData(width, height, format, buffer_copy, -pitch)
frames.append(AnimationFrame(image, duration))
interesting_time = c_int()
duration = c_int()
quicktime.GetTrackNextInterestingTime(
visual,
nextTimeMediaSample,
time,
1,
byref(interesting_time),
byref(duration))
quicktime.SetMovieTimeValue(movie, interesting_time)
time = interesting_time.value
duration = duration.value / time_scale
if duration <= 0.01:
duration = 0.1
quicktime.DisposeMovie(movie)
carbon.DisposeHandle(data_ref)
quicktime.ExitMovies()
return Animation(frames)
def get_decoders():
return [QuickTimeImageDecoder()]
def get_encoders():
return []
| {
"content_hash": "38b47a030a527ea59ea0c6281035cf27",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 80,
"avg_line_length": 32.003937007874015,
"alnum_prop": 0.5936769590355517,
"repo_name": "Codlydodly/python-client",
"id": "7fb39e84f59f824fe893276c1abedfc47937dee0",
"size": "9845",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pyglet/image/codecs/quicktime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4631454"
},
{
"name": "Shell",
"bytes": "3753"
}
],
"symlink_target": ""
} |
import datetime
import time
import re
class AmbiguousLookup(ValueError):
pass
class NoMatch(ValueError):
pass
def complete(it, lookup, key_desc):
partial_match = None
for i in it:
if i == lookup:
return i
if i.startswith(lookup):
if partial_match is not None:
matches = sorted(i for i in it if i.startswith(lookup))
raise AmbiguousLookup('ambiguous %s "%s":' %
(key_desc, lookup), matches)
partial_match = i
if partial_match is None:
raise NoMatch('no such %s "%s".' % (key_desc, lookup))
else:
return partial_match
def pprint_table(table, footer_row=False):
if footer_row:
check = table[:-1]
else:
check = table
widths = [3 + max(len(row[col]) for row in check) for col
in xrange(len(table[0]))]
for row in table:
# Don't pad the final column
first_cols = [cell + ' ' * (spacing - len(cell))
for (cell, spacing) in zip(row[:-1], widths[:-1])]
print ''.join(first_cols + [row[-1]])
today_str = time.strftime("%Y-%m-%d", datetime.datetime.now().timetuple())
matches = [(re.compile(r'^\d+:\d+$'), today_str + " ", ":00"),
(re.compile(r'^\d+:\d+:\d+$'), today_str + " ", ""),
(re.compile(r'^\d+-\d+-\d+$'), "", " 00:00:00"),
(re.compile(r'^\d+-\d+-\d+\s+\d+:\d+$'), "", ":00"),
(re.compile(r'^\d+-\d+-\d+\s+\d+:\d+:\d+$'), "", ""),
]
fmt = "%Y-%m-%d %H:%M:%S"
def parse_date_time(dt_str):
for (patt, prepend, postpend) in matches:
if patt.match(dt_str):
res = time.strptime(prepend + dt_str + postpend, fmt)
return int(time.mktime(res))
raise ValueError, "%s is not in a valid time format"%dt_str
def parse_date_time_or_now(dt_str):
if dt_str:
return parse_date_time(dt_str)
else:
return int(time.time())
def timedelta_hms_display(timedelta):
hours = timedelta.days * 24 + timedelta.seconds / 3600
minutes = timedelta.seconds / 60 % 60
seconds = timedelta.seconds % 60
return '%02d:%02d:%02d' % (hours, minutes, seconds)
| {
"content_hash": "f5286b9db725ca15565cd3167d65b4a2",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 74,
"avg_line_length": 33.54545454545455,
"alnum_prop": 0.5365853658536586,
"repo_name": "vogdan/timebook",
"id": "dbe33a3677a846a1e78fbe3b33ff54f922b213a6",
"size": "3328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "timebook/cmdutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38498"
}
],
"symlink_target": ""
} |
from streamsx.topology.topology import *
import streamsx.topology.context
from streamsx.topology.schema import *
import sys
import pytest_funcs
def main():
tkdir = sys.argv[1]
topo = Topology("json_map_json")
ts = topo.subscribe("pytest/json/map", schema=CommonSchema.Json)
ts = ts.map(pytest_funcs.json_add)
ts.publish("pytest/json/map/result", schema=CommonSchema.Json)
config = {}
config['topology.toolkitDir'] = tkdir
streamsx.topology.context.submit("TOOLKIT", topo.graph, config)
if __name__ == '__main__':
main()
| {
"content_hash": "a0d768f6bf6a995c00eb0c5872632ccc",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 21.96,
"alnum_prop": 0.7085610200364298,
"repo_name": "IBMStreams/streamsx.topology",
"id": "feddbd1b508f64c0c17d20c94d52b0be5a037f36",
"size": "615",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "test/python/pubsub/json_map_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15862"
},
{
"name": "C++",
"bytes": "189639"
},
{
"name": "HTML",
"bytes": "11074"
},
{
"name": "Java",
"bytes": "2253833"
},
{
"name": "Makefile",
"bytes": "10174"
},
{
"name": "Perl",
"bytes": "2563"
},
{
"name": "Python",
"bytes": "1949128"
},
{
"name": "Raku",
"bytes": "37043"
},
{
"name": "Scala",
"bytes": "11007"
},
{
"name": "Shell",
"bytes": "16265"
}
],
"symlink_target": ""
} |
from django.utils.html import strip_tags
class Highlighter(object):
css_class = 'highlighted'
html_tag = 'span'
max_length = 200
text_block = ''
def __init__(self, query, **kwargs):
self.query = query
if 'max_length' in kwargs:
self.max_length = int(kwargs['max_length'])
if 'html_tag' in kwargs:
self.html_tag = kwargs['html_tag']
if 'css_class' in kwargs:
self.css_class = kwargs['css_class']
self.query_words = set([word.lower() for word in self.query.split() if not word.startswith('-')])
def highlight(self, text_block):
self.text_block = strip_tags(text_block)
highlight_locations = self.find_highlightable_words()
start_offset, end_offset = self.find_window(highlight_locations)
return self.render_html(highlight_locations, start_offset, end_offset)
def find_highlightable_words(self):
# Use a set so we only do this once per unique word.
word_positions = {}
# Pre-compute the length.
end_offset = len(self.text_block)
lower_text_block = self.text_block.lower()
for word in self.query_words:
if not word in word_positions:
word_positions[word] = []
start_offset = 0
while start_offset < end_offset:
next_offset = lower_text_block.find(word, start_offset, end_offset)
# If we get a -1 out of find, it wasn't found. Bomb out and
# start the next word.
if next_offset == -1:
break
word_positions[word].append(next_offset)
start_offset = next_offset + len(word)
return word_positions
def find_window(self, highlight_locations):
best_start = 0
best_end = self.max_length
# First, make sure we have words.
if not len(highlight_locations):
return (best_start, best_end)
words_found = []
# Next, make sure we found any words at all.
for word, offset_list in highlight_locations.items():
if len(offset_list):
# Add all of the locations to the list.
words_found.extend(offset_list)
if not len(words_found):
return (best_start, best_end)
if len(words_found) == 1:
return (words_found[0], words_found[0] + self.max_length)
# Sort the list so it's in ascending order.
words_found = sorted(words_found)
# We now have a denormalized list of all positions were a word was
# found. We'll iterate through and find the densest window we can by
# counting the number of found offsets (-1 to fit in the window).
highest_density = 0
if words_found[:-1][0] > self.max_length:
best_start = words_found[:-1][0]
best_end = best_start + self.max_length
for count, start in enumerate(words_found[:-1]):
current_density = 1
for end in words_found[count + 1:]:
if end - start < self.max_length:
current_density += 1
else:
current_density = 0
# Only replace if we have a bigger (not equal density) so we
# give deference to windows earlier in the document.
if current_density > highest_density:
best_start = start
best_end = start + self.max_length
highest_density = current_density
return (best_start, best_end)
def render_html(self, highlight_locations=None, start_offset=None, end_offset=None):
# Start by chopping the block down to the proper window.
text = self.text_block[start_offset:end_offset]
# Invert highlight_locations to a location -> term list
term_list = []
for term, locations in highlight_locations.items():
term_list += [(loc - start_offset, term) for loc in locations]
loc_to_term = sorted(term_list)
# Prepare the highlight template
if self.css_class:
hl_start = '<%s class="%s">' % (self.html_tag, self.css_class)
else:
hl_start = '<%s>' % (self.html_tag)
hl_end = '</%s>' % self.html_tag
# Copy the part from the start of the string to the first match,
# and there replace the match with a highlighted version.
highlighted_chunk = ""
matched_so_far = 0
prev = 0
prev_str = ""
for cur, cur_str in loc_to_term:
# This can be in a different case than cur_str
actual_term = text[cur:cur + len(cur_str)]
# Handle incorrect highlight_locations by first checking for the term
if actual_term.lower() == cur_str:
if cur < prev + len(prev_str):
continue
highlighted_chunk += text[prev + len(prev_str):cur] + hl_start + actual_term + hl_end
prev = cur
prev_str = cur_str
# Keep track of how far we've copied so far, for the last step
matched_so_far = cur + len(actual_term)
# Don't forget the chunk after the last term
highlighted_chunk += text[matched_so_far:]
if start_offset > 0:
highlighted_chunk = '...%s' % highlighted_chunk
if end_offset < len(self.text_block):
highlighted_chunk = '%s...' % highlighted_chunk
return highlighted_chunk
| {
"content_hash": "385b40ffa9699542f19286408d0e24bb",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 105,
"avg_line_length": 36.98136645962733,
"alnum_prop": 0.5290560967416863,
"repo_name": "hellhovnd/django-haystack",
"id": "1ede8036ae9b2d74f3db9c4219d8309c644fafab",
"size": "5954",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "haystack/utils/highlighting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1431"
},
{
"name": "Python",
"bytes": "672800"
},
{
"name": "Shell",
"bytes": "1583"
}
],
"symlink_target": ""
} |
from .lims_calibratorsAndMixes_io import lims_calibratorsAndMixes_io
class lims_calibratorsAndMixes_execute(lims_calibratorsAndMixes_io):
pass; | {
"content_hash": "37d3adcf276454e169bf56a63e0ee564",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 68,
"avg_line_length": 37,
"alnum_prop": 0.8378378378378378,
"repo_name": "dmccloskey/SBaaS_LIMS",
"id": "833f7f46d0039b17a4e65e0056b2bc9576256aa8",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SBaaS_LIMS/lims_calibratorsAndMixes_execute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "349505"
}
],
"symlink_target": ""
} |
import enum
class SeverityNumber(enum.Enum):
"""Numerical value of severity.
Smaller numerical values correspond to less severe events
(such as debug events), larger numerical values correspond
to more severe events (such as errors and critical events).
See the `Log Data Model`_ spec for more info and how to map the
severity from source format to OTLP Model.
.. _Log Data Model: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber
"""
UNSPECIFIED = 0
TRACE = 1
TRACE2 = 2
TRACE3 = 3
TRACE4 = 4
DEBUG = 5
DEBUG2 = 6
DEBUG3 = 7
DEBUG4 = 8
INFO = 9
INFO2 = 10
INFO3 = 11
INFO4 = 12
WARN = 13
WARN2 = 14
WARN3 = 15
WARN4 = 16
ERROR = 17
ERROR2 = 18
ERROR3 = 19
ERROR4 = 20
FATAL = 21
FATAL2 = 22
FATAL3 = 23
FATAL4 = 24
_STD_TO_OTLP = {
10: SeverityNumber.DEBUG,
11: SeverityNumber.DEBUG2,
12: SeverityNumber.DEBUG3,
13: SeverityNumber.DEBUG4,
14: SeverityNumber.DEBUG4,
15: SeverityNumber.DEBUG4,
16: SeverityNumber.DEBUG4,
17: SeverityNumber.DEBUG4,
18: SeverityNumber.DEBUG4,
19: SeverityNumber.DEBUG4,
20: SeverityNumber.INFO,
21: SeverityNumber.INFO2,
22: SeverityNumber.INFO3,
23: SeverityNumber.INFO4,
24: SeverityNumber.INFO4,
25: SeverityNumber.INFO4,
26: SeverityNumber.INFO4,
27: SeverityNumber.INFO4,
28: SeverityNumber.INFO4,
29: SeverityNumber.INFO4,
30: SeverityNumber.WARN,
31: SeverityNumber.WARN2,
32: SeverityNumber.WARN3,
33: SeverityNumber.WARN4,
34: SeverityNumber.WARN4,
35: SeverityNumber.WARN4,
36: SeverityNumber.WARN4,
37: SeverityNumber.WARN4,
38: SeverityNumber.WARN4,
39: SeverityNumber.WARN4,
40: SeverityNumber.ERROR,
41: SeverityNumber.ERROR2,
42: SeverityNumber.ERROR3,
43: SeverityNumber.ERROR4,
44: SeverityNumber.ERROR4,
45: SeverityNumber.ERROR4,
46: SeverityNumber.ERROR4,
47: SeverityNumber.ERROR4,
48: SeverityNumber.ERROR4,
49: SeverityNumber.ERROR4,
50: SeverityNumber.FATAL,
51: SeverityNumber.FATAL2,
52: SeverityNumber.FATAL3,
53: SeverityNumber.FATAL4,
}
def std_to_otlp(levelno: int) -> SeverityNumber:
"""
Map python log levelno as defined in https://docs.python.org/3/library/logging.html#logging-levels
to OTLP log severity number.
"""
if levelno < 10:
return SeverityNumber.UNSPECIFIED
if levelno > 53:
return SeverityNumber.FATAL4
return _STD_TO_OTLP[levelno]
| {
"content_hash": "a5b3171608e18de7df74d0fbd00f4a04",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 149,
"avg_line_length": 26.207920792079207,
"alnum_prop": 0.6728371741594258,
"repo_name": "open-telemetry/opentelemetry-python",
"id": "25703759909483b60e871fd05d15a5bedeedef0e",
"size": "3232",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "opentelemetry-sdk/src/opentelemetry/sdk/_logs/severity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "1683"
},
{
"name": "Python",
"bytes": "1788131"
},
{
"name": "Shell",
"bytes": "6950"
},
{
"name": "Thrift",
"bytes": "17840"
}
],
"symlink_target": ""
} |
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""example_dasch_methods.py.
"""
import numpy as np
import abel
import matplotlib.pyplot as plt
# Dribinski sample image size 501x501
n = 501
IM = abel.tools.analytical.sample_image(n)
# split into quadrants
origQ = abel.tools.symmetry.get_image_quadrants(IM)
# speed distribution of original image
orig_speed = abel.tools.vmi.angular_integration(origQ[0], origin=(0,0))
scale_factor = orig_speed[1].max()
plt.plot(orig_speed[0], orig_speed[1]/scale_factor, linestyle='dashed',
label="Dribinski sample")
# forward Abel projection
fIM = abel.Transform(IM, direction="forward", method="hansenlaw").transform
# split projected image into quadrants
Q = abel.tools.symmetry.get_image_quadrants(fIM)
dasch_transform = {\
"two_point": abel.dasch.two_point_transform,
"three_point": abel.dasch.three_point_transform,
"onion_peeling": abel.dasch.onion_peeling_transform}
for method in dasch_transform.keys():
Q0 = Q[0].copy()
# method inverse Abel transform
AQ0 = dasch_transform[method](Q0)
# speed distribution
speed = abel.tools.vmi.angular_integration(AQ0, origin=(0,0))
plt.plot(speed[0], speed[1]*orig_speed[1][14]/speed[1][14]/scale_factor,
label=method)
plt.title("Dasch methods for Dribinski sample image $n={:d}$".format(n))
plt.axis(xmax=250, ymin=-0.1)
plt.legend(loc=0, frameon=False, labelspacing=0.1, fontsize='small')
plt.savefig("example_dasch_methods.png",dpi=100)
plt.show()
| {
"content_hash": "b612a123c8baa29ba8bc6d1e2cc9ec24",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 29.78846153846154,
"alnum_prop": 0.7256294383473209,
"repo_name": "rth/PyAbel",
"id": "0d75ae16e1e05beab69e70e59968522445a47f9b",
"size": "1573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example_dasch_methods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187936"
}
],
"symlink_target": ""
} |
'''
Simple RPC
Copyright (c) 2012-2013, LastSeal S.A.
'''
from simplerpc.common.python3 import portable_getargspec
from simplerpc.expose_api.javascript.base import JsTranslatorBase
from simplerpc.base.SimpleRpcLogicBase import SimpleRpcLogicBase
from simplerpc.expose_api.javascript.data_model import TranslationAstNode, \
AutoTemplateAstNode
from simplerpc.expose_api.decorators import getDecoratorsList
from simplerpc.context.SimpleRpcContext import SimpleRpcContext
class ClassToJs(JsTranslatorBase):
'''
Translate a Python class Into a Exposed Class in Javascript
Creates a Translation AST of the class.
'''
def translateClass(self, class_):
class_namespace = self._getJsNamespace(class_)
class_node = TranslationAstNode('classes.ExposedClass')
class_node.translate(CLASS_NAME=class_.__name__,
NAMESPACE='/'.join(class_namespace.split('.')))
methods_node = AutoTemplateAstNode()
for decorator_class in getDecoratorsList():
node = self._getMethodTypeNode(class_, class_namespace, decorator_class)
if node:
methods_node.translate(decorator_class.__name__, node)
class_node.translate(METHODS=methods_node)
return class_node
def _getArgsJs(self, method_args):
args_string = ' ,'.join(method_args)
if args_string != '':
args_string += ','
return args_string
def _getKwargsJs(self, method_args):
kwargs_string = ''
for arg in method_args:
kwargs_string += '%r:%s,' % (arg, arg)
return kwargs_string
def _getClassInstance(self, class_):
if issubclass(class_, SimpleRpcLogicBase):
return class_(self.context)
else:
return class_()
def _getMethodTypeNode(self, class_, class_namespace, decorator_class):
methods_node = AutoTemplateAstNode()
instance = self._getClassInstance(class_)
exposed_methods = instance.exposedMethods(decorator_class)
if len(exposed_methods):
for method_name in exposed_methods:
mt_node = TranslationAstNode('methods.%s' % decorator_class.__name__)
method = getattr(instance, method_name)
if hasattr(method, 'method'): #TODO should look for getDecoratedMethod
method = method.method
method_args = portable_getargspec(method)[0][1:]
cmd_string = class_namespace + '.' + method_name
mt_node.translate(METHOD_NAME=method_name,
ARGS=self._getArgsJs(method_args),
KWARGS=self._getKwargsJs(method_args),
RPC_METHOD=cmd_string,)
methods_node.translate(method_name, mt_node)
return methods_node
else:
return None
def smokeTestModule():
context = SimpleRpcContext('smoketest')
ctjt = ClassToJs(context)
from example_rpc.exposed_api.images.ImagesBrowser import ImagesBrowser
tree = ctjt.translateClass(ImagesBrowser)
context.log(tree)
from simplerpc.expose_api.javascript.TemplatesCollector import TemplatesCollector
templates = TemplatesCollector(context).collectBuiltIn()
context.log(tree.getString(templates))
if __name__ == "__main__":
smokeTestModule()
| {
"content_hash": "ae23d783843ff8a941eb1eb0ab6e6793",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 86,
"avg_line_length": 39.53488372093023,
"alnum_prop": 0.6402941176470588,
"repo_name": "joaduo/python-simplerpc",
"id": "84ee38c754a4cb8196b9986cca415f5135dd19ea",
"size": "3424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplerpc/expose_api/javascript/ClassToJs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "37891"
},
{
"name": "Python",
"bytes": "96545"
}
],
"symlink_target": ""
} |
'''
The MIT License (MIT)
Copyright (c) 2007 ~ 2015, Hong-She Liang <starofrainnight@gmail.com>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
'''
Bootstrap rabird setup environment
@date 2015-08-20
@author Hong-She Liang <starofrainnight@gmail.com>
'''
import os
import os.path
import platform
import subprocess
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download(url):
downloader = get_best_downloader()
downloader(url, os.path.basename(url))
def use_pip():
try:
import pip
except:
import os
import sys
# If we do not have pip, we fetch and install one. It will also install
# setuptools and wheel.
url = "https://bootstrap.pypa.io/get-pip.py"
filename = os.path.basename(url)
download(url)
os.system("%s %s" % (sys.executable, filename))
def use_rabird():
try:
import rabird.core
except:
use_pip()
import pip
pip.main(["install", "rabird.core"])
| {
"content_hash": "8c8a7f19c57ebe406d4d2b139305e594",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 87,
"avg_line_length": 27.931818181818183,
"alnum_prop": 0.6362896663954435,
"repo_name": "starofrainnight/u8gettext",
"id": "7f386e0579c448f67b4ce4a024115dfd7cfd9ba5",
"size": "4916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabird_bootstrap.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17046"
}
],
"symlink_target": ""
} |
import time
import yaml
import os
import sys
import requests
import re
import json
#define access levels, as per the gitlab documentation
access_levels = {}
access_levels['GUEST'] = 10
access_levels['REPORTER'] = 20
access_levels['DEVELOPER'] = 30
access_levels['MASTER'] = 40
access_levels['OWNER'] = 50
try:
config_file = os.environ['GITLAB_CONFIG_PATH'] + "/config.yml"
with open(config_file,'r') as configuration_file:
config = yaml.load(configuration_file)
if config is None:
print "no config"
exit(-1)
except Exception as e:
# If the config file cannot be imported as a dictionary, bail!
print e
sys.exit(-1)
#setup proxy settings
if 'http_proxy' in config or 'https_proxy' in config:
if 'http_proxy' in config:
os.environ['http_proxy'] = config['http_proxy']
os.environ['HTTP_PROXY'] = config['http_proxy']
else:
os.environ['http_proxy'] = config['https_proxy']
os.environ['HTTP_PROXY'] = config['https_proxy']
if 'https_proxy' in config:
os.environ['https_proxy'] = config['https_proxy']
os.environ['HTTPS_PROXY'] = config['https_proxy']
else:
os.environ['https_proxy'] = config['http_proxy']
os.environ['HTTPS_PROXY'] = config['http_proxy']
if 'no_proxy' in config:
os.environ['no_proxy'] = config['no_proxy']
os.environ['NO_PROXY'] = config['no_proxy']
else:
os.environ['no_proxy'] = "127.0.0.1,localhost"
os.environ['NO_PROXY'] = "127.0.0.1,localhost"
if not 'collector_address' in config:
print "[Error]: Collector address not found in configuration file"
sys.exit(-1)
with open(os.environ['VG_HOOK']+"/hook.properties",'w') as f:
f.write("collector.address="+ config['collector_address'])
limit = 600 #10 minute timeout by default
total_time =0
wait_time = 10
url = "http://localhost"
preg = re.compile('/sign_in\Z')
success = False
while total_time < limit:
try:
r = requests.get(url)
if preg.search(r.url):
success = True
break
if r:
print r.url
except Exception as e:
pass
total_time+=wait_time
time.sleep(wait_time)
if not success:
print "gitlab failed to start"
sys.exit(-1)
print "Gitlab is up, starting setup"
api = "http://localhost/api/v3/"
com = "session"
req = api + com
auth = {'login':'root',
'password':'5iveL!fe'
}
try:
r = requests.post(req,
data = auth
)
token = r.json()['private_token']
print "got token"
except Exception as e:
print "failure"
print e
print r.text
sys.exit(-1)
header = {'PRIVATE-TOKEN':token}
# Modify root
if 'admin' in config and 'password' in config['admin']:
#first get root id(i'm guessing zero....but who knows...)
com = "/users?search=root"
req = api + com
try:
r = requests.get(req, headers = header)
root_id = r.json()[0]['id']
except Exception as e:
print e
if 200 == r.status_code:
com = "users/"+str(root_id)
req = api + com
try:
root_data = {'name':config['admin']['name'],
'password':config['admin']['password'],
'admin':'true'
}
r = requests.put(req,root_data,headers = header)
except Exception as e:
print e
# Create the users
com = "users"
req = api + com
user_ids = {}
for user in config['users']:
user_data = {'email':user['email'],
'password':user['password'],
'username':user['username'],
'name':user['name'],
'confirm':'false'}
try:
r = requests.post(req,data = user_data, headers = header)
if 201 == r.status_code:
print "User successfully created"
user_ids[user['username']] = r.json()['id']
else:
print "Unable to create user:"
print r.text
print r.json()['id']
except Exception as e:
print e
#add user ssh key(if available)
if 'ssh_key' in user:
if os.path.isfile('/var/gitlab/config/'+user['ssh_key']):
with open('/var/gitlab/config/'+user['ssh_key'],"r") as myfile:
ssh_key_command = "users/"+str(user_ids[user['username']])+"/keys"
ssh_key_req = api + ssh_key_command
data=myfile.read().replace('\n','')
ssh_key_data = {'id':user_ids[user['username']],
'title':user['username'],
'key':data
}
r = requests.post(ssh_key_req,data = ssh_key_data,headers = header)
if 201 == r.status_code:
print "SSH key successfully added"
else:
print "Unable to add ssh key:"
print r.text
# Create projects
com = "projects"
req = api + com
for project in config['projects']:
print project
com = "projects"
req = api + com
project_data = {'name':project['name'],
'namespace_id':user_ids[project['owner']]
}
try:
r = requests.post(req, data = project_data, headers = header)
if 201 == r.status_code:
print "Project successfully created"
project_id = r.json()['id']
else:
print "Unable to create project:"
print r.text
except Exception as e:
print e
#add team members, if any, with their respective levels of access
# valid levels of access are: Master, Developer, Reporter, Guest
for member in project['team_members']:
com = "projects/"+str(project_id)+"/members"
req = api + com
#need to find the user id based on the username
member_data = {'id':str(project_id),
'user_id':str(user_ids[member['username']]),
'access_level':access_levels[member['access']]
}
try:
r = requests.post(req, data = member_data, headers = header)
if 201 == r.status_code:
print "Member successfully added"
else:
print "Unable to add member:"
print r.status_code
print r.text
except Exception as e:
print e
print "setup complete"
#curl http://localhost/api/v3/session --data-urlencode 'login=root' --data-urlencode 'password=verigreen' | jq --raw-output .private_token
| {
"content_hash": "61c1666d57d7df8744ebcd6471c04d19",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 138,
"avg_line_length": 30.941747572815533,
"alnum_prop": 0.5723250705993097,
"repo_name": "Verigreen/demo",
"id": "c1afd9d14496db65a53682edc347917250299d0b",
"size": "6427",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gitlab/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "567"
},
{
"name": "Python",
"bytes": "6427"
},
{
"name": "Shell",
"bytes": "36817"
}
],
"symlink_target": ""
} |
import sys, os, signal, time, shutil, cgi
import commands, re
import urllib
import json
import traceback
from xml.dom import minidom
from xml.dom.minidom import Document
from xml.dom.minidom import parse, parseString
# Initialize the configuration singleton
import environment
env = environment.set_environment()
from processes import killProcesses
# exit code
EC_Failed = 255
try:
import datetime
except:
pass
try:
from PilotErrors import PilotErrors
except:
pass
# Functions to serialize ARGO messages
def serialize(obj):
return json.dumps(obj,sort_keys=True,indent=2, separators=(',', ': '))
def deserialize(text):
return json.loads(text)
def convert_unicode_string(unicode_string):
if unicode_string is not None:
return str(unicode_string)
return None
# all files that need to be copied to the workdir
#fileList = commands.getoutput('ls *.py').split()
def getFileList(path_dir=None):
try:
if path_dir is None:
path_dir = env['pilot_initdir']
file_list = filter(lambda x: x.endswith('.py'), os.listdir(path_dir))
file_list.append('PILOTVERSION')
file_list.append('saga')
file_list.append('radical')
file_list.append('HPC')
file_list.append('movers')
tolog("Copying: %s" % str(file_list))
return file_list
except KeyError:
return []
# default pilot log files
pilotlogFilename = "pilotlog.txt"
essentialPilotlogFilename = "pilotlog-essential.txt"
pilotstderrFilename = "pilot.stderr"
def setPilotlogFilename(filename):
""" Set the pilot log file name """
global pilotlogFilename, essentialPilotlogFilename
if len(filename) > 0:
pilotlogFilename = filename
# Add the essential sub string
base = pilotlogFilename[:pilotlogFilename.find('.')] # pilotlog.txt -> pilotlog
essentialPilotlogFilename = pilotlogFilename.replace(base, base+'-essential')
def getPilotlogFilename():
""" Return the pilot log file name """
return pilotlogFilename
def setPilotstderrFilename(filename):
""" Set the pilot stderr file name"""
global pilotstderrFilename
if len(filename) > 0:
pilotstderrFilename = filename
def getPilotstderrFilename():
""" return the pilot stderr file name"""
return pilotstderrFilename
def tolog_file(msg):
""" write date+msg to pilot log only """
t = time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.time()))
appendToLog("%s| %s\n" % (t, msg))
def appendToLog(txt):
""" append txt to file """
try:
f = open(pilotlogFilename, 'a')
f.write(txt)
f.close()
except Exception, e:
if "No such file" in e:
pass
else:
print "WARNING: Exception caught: %s" % e
def tologNew(msg, tofile=True, label='INFO', essential=False):
""" Write message to pilot log and to stdout """
# remove backquotes from the msg since they cause problems with batch submission of pilot
# (might be present in error messages from the OS)
msg = msg.replace("`","'")
msg = msg.replace('"','\\"')
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
from Logger import Logger
if essential:
log = Logger(filename=essentialPilotlogFilename)
else:
log = Logger(filename=pilotlogFilename)
if tofile:
if label == 'INFO':
log.info(msg)
elif label == 'WARNING':
log.warning(msg)
elif label == 'DEBUG':
log.debug(msg)
elif label == 'ERROR':
log.error(msg)
elif label == 'CRITICAL':
log.critical(msg)
else:
log.warning('Unknown label: %s' % (label))
log.info(msg)
else:
print msg
# write any serious messages to stderr
if label == 'ERROR' or label == 'CRITICAL':
print >> sys.stderr, msg # write any FAILED messages to stderr
def tolog(msg, tofile=True, label='INFO', essential=False):
""" Write date+msg to pilot log and to stdout """
try:
import inspect
MAXLENGTH = 12
# getting the name of the module that is invoking tolog() and adjust the length
try:
module_name = os.path.basename(inspect.stack()[1][1])
except Exception, e:
module_name = "unknown"
#print "Exception caught by tolog(): ", e,
module_name_cut = module_name[0:MAXLENGTH].ljust(MAXLENGTH)
msg = "%i|%s| %s" % (os.getpid(),module_name_cut, msg)
t = timeStampUTC(format='%Y-%m-%d %H:%M:%S')
if tofile:
appendToLog("%s|%s\n" % (t, msg))
# remove backquotes from the msg since they cause problems with batch submission of pilot
# (might be present in error messages from the OS)
msg = msg.replace("`","'")
msg = msg.replace('"','\\"')
print "%s| %s" % (t, msg)
# write any FAILED messages to stderr
if "!!FAILED!!" in msg:
try:
print >> sys.stderr, "%s| %s" % (t, msg)
except:
print "Failed to print to sys.stderr: %s" % (t, msg)
except:
print "!!WARNING!!4000!! %s" % traceback.format_exc()
def tolog_err(msg):
""" write error string to log """
tolog("!!WARNING!!4000!! %s" % str(msg))
def tolog_warn(msg):
""" write warning string to log """
tolog("!!WARNING!!4000!! %s" % str(msg))
def makeHTTPUpdate(state, node, port, url='pandaserver.cern.ch', path=None):
""" make http connection to jobdispatcher """
if state == 'finished' or state == 'failed' or state == 'holding':
tolog("Preparing for final Panda server update")
trial = 1
max_trials = 10
delay = 2*60 # seconds
tolog("Max number of trials: %d, separated by delays of %d seconds" % (max_trials, delay))
else:
# standard non final update
trial = 1
max_trials = 1
delay = None
# make http connection to jobdispatcher
while trial <= max_trials:
# draw a random server URL
_url = '%s:%s/server/panda' % (url, port)
tolog("HTTP connect using server: %s" % (_url))
ret = httpConnect(node, _url, path=path)
if ret[0] and trial == max_trials: # non-zero exit code
if delay: # final update
tolog("!!FAILED!!4000!! [Trial %d/%d] Could not update Panda server (putting job in holding state if possible)" %\
(trial, max_trials))
# state change will take place in postJobTask
# (the internal pilot state will not be holding but lostheartbeat)
else:
tolog("!!WARNING!!4000!! [Trial %d/%d] Could not update Panda server, EC = %d" %\
(trial, max_trials, ret[0]))
break
elif ret[0]: # non-zero exit code
tolog("!!WARNING!!4000!! [Trial %d/%d] Could not update Panda server, EC = %d" %\
(trial, max_trials, ret[0]))
if delay: # final update
tolog("Can not miss the final update. Will take a nap for %d seconds and then try again.." % (delay))
trial += 1
time.sleep(delay)
else: # try again later
tolog("Panda server update postponed..")
break
else:
break
return ret
def httpConnect(data, url, mode="UPDATE", sendproxy=False, path=None, experiment=""):
""" function to handle the http connection """
# check if a job should be downloaded or if it's a server update
if mode == "GETJOB":
cmd = 'getJob'
elif mode == "ISTHEREANALYSISJOB":
cmd = 'isThereAnalysisJob'
elif mode == "GETSTATUS":
cmd = 'getStatus'
elif mode == "GETEVENTRANGES":
cmd = 'getEventRanges'
elif mode == "UPDATEEVENTRANGE":
cmd = 'updateEventRange'
elif mode == "UPDATEEVENTRANGES":
cmd = 'updateEventRanges'
elif mode == "GETKEYPAIR":
cmd = 'getKeyPair'
else:
cmd = 'updateJob'
# only send attemptNr with updateJob
if cmd != 'updateJob' and data.has_key('attemptNr'):
tolog("Removing attemptNr from node structure since it is not needed for command %s" % (cmd))
del data['attemptNr']
else:
if data.has_key('attemptNr'):
tolog("Sending attemptNr=%d for cmd=%s" % (data['attemptNr'], cmd))
else:
tolog("Will not send attemptNr for cmd=%s" % (cmd))
# send the data dictionary to the dispatcher using command cmd
# return format: status, parsed data, response
return toServer(url, cmd, data, path, experiment)
def returnLogMsg(logf=None, linenum=20):
''' return the last N lines of log files into a string'''
thisLog = ''
if logf:
if not os.path.isfile(logf):
thisLog = "\n- No log file %s found -" % (logf)
else:
thisLog = "\n- Log from %s -" % (logf)
f = open(logf)
lines = f.readlines()
f.close()
if len(lines) <= linenum:
ln = len(lines)
else:
ln = linenum
for i in range(-ln,0):
thisLog += lines[i]
return thisLog
def findGuid(analJob, metadata_filename, filename):
""" find guid in alternative file or generate it """
guid = None
metadata_path = os.path.dirname(metadata_filename)
if os.path.exists(metadata_filename):
# now grab the guids from the preprocessed metadata
_guid = getGuidsFromXML(metadata_path, filename=filename, metadata=metadata_filename)
if _guid != []:
if _guid[0] != "":
tolog("Found guid %s in %s (missing in PFC)" % (_guid[0], metadata_filename))
guid = _guid[0]
else:
guid = None
else:
guid = None
else:
tolog("Could not locate file: %s" % (metadata_filename))
if not guid:
if analJob:
guid = getGUID()
tolog("Generated guid: %s" % (guid))
else:
tolog("Guid missing for file: %s (b)" % (filename))
guid = None
else:
tolog("Guid identified")
return guid
def preprocessMetadata(filename):
""" remove META tags from metadata since they can contain value that minidom can not chandle """
status = True
# loop over the file and remove the META tags
try:
f = open(filename, "r")
except Exception, e:
tolog("!!WARNING!!2999!! Could not open file: %s (%s)" % (filename, e))
status = False
else:
lines = f.readlines()
f.close()
# remove the META tags
new_lines = ""
for line in lines:
if not "<META" in line and not "<metadata" in line:
new_lines += line
# remove the old file before recreating it
try:
os.remove(filename)
except Exception, e:
tolog("!!WARNING!!2999!! Could not remove file: %s (%s)" % (filename, e))
status = False
else:
try:
f = open(filename, "w")
except Exception, e:
tolog("!!WARNING!!2999!! Could not recreate file: %s (%s)" % (filename, e))
status = False
else:
f.writelines(new_lines)
f.close()
tolog("New temporary metadata file:\n" + new_lines)
return status
def prepareMetadata(metadata_filename):
""" prepare the metadata for potential guid rescue """
metadata_filename_BAK = metadata_filename + ".BAK"
metadata_filename_ORG = metadata_filename
if os.path.exists(metadata_filename):
# first create a copy of the metadata
try:
shutil.copy2(metadata_filename, metadata_filename_BAK)
except Exception, e:
tolog("!!WARNING!!2999!! Could not copy metadata: %s" % (e))
else:
metadata_filename = metadata_filename_BAK
tolog("Created file: %s" % (metadata_filename))
# remove junk metadata
try:
status = preprocessMetadata(metadata_filename)
except Exception, e:
tolog("!!WARNING!!2999!! Could not preprocess metadata: %s" % (e))
metadata_filename = metadata_filename_ORG
else:
if status:
tolog("Successfully updated %s" % (metadata_filename))
else:
tolog("Could not update %s" % (metadata_filename))
metadata_filename = metadata_filename_ORG
else:
tolog("Nothing for prepareMetadata() to do (file %s does not exist)" % (metadata_filename))
return metadata_filename
def PFCxml(experiment, fname, fnlist=[], fguids=[], fntag=None, alog=None, alogguid=None, fsize=[], checksum=[], analJob=False, jr=False, additionalOutputFile=None, additionalOutputFileGuid=None, logToOS=False, archive=None):
""" Create a PFC style XML file """
# fnlist = output file list
# fguids = output file guid list
# fntag = pfn/lfn identifier
# alog = name of log file
# alogguid = guid of log file
# fsize = file size list
# checksum = checksum list
# analJob = analysis job
# jr = job recovery mode, guid generation by pilot not allowed
# fntag = lfn is used for the metadata-<jobId>.xml that is sent to the server
# fntag = pfn is used for OutPutFileCatalog.xml that is used by the mover for the stage out
# The SURL will be added to the metadata file for fntag = lfn to allow for server side LFC registration
status = True
flist = []
glist = []
from SiteMover import SiteMover
# get the experiment object
thisExperiment = getExperiment(experiment)
# for metadata.xml prepare the file for potential guid grabbing
if "metadata" in fname and None in fguids:
metadata_filename = prepareMetadata(fname + ".PAYLOAD")
else:
metadata_filename = fname
# add log file
if alog:
flist.append(alog)
if not alogguid:
if not jr:
alogguid = getGUID()
tolog("Generated log guid: %s" % (alogguid))
else:
tolog("!!WARNING!!2999!! Log guid generation not allowed in recovery mode")
alogguid = ''
status = False
glist.append(alogguid)
# add additional output files (only for CERNVM, not NG or any other sites)
if additionalOutputFile:
flist.append(additionalOutputFile)
if not additionalOutputFileGuid:
additionalOutputFileGuid = getGUID()
glist.append(additionalOutputFileGuid)
if fnlist:
flist = flist + fnlist
tolog("fnlist = %s" % str(fnlist))
tolog("fguids = %s" % str(fguids))
for i in range(0, len(fnlist)):
# check for guid
try:
_dummy = fguids[i]
del _dummy
except IndexError, e:
guid = findGuid(analJob, metadata_filename, fnlist[i])
if guid and guid != "":
tolog("Found guid for file: %s (%s)" % (fnlist[i], guid))
else:
if not jr:
guid = getGUID()
tolog("Generated guid for file (%d): %s (%s)" % (i, fnlist[i], guid))
else:
tolog("!!WARNING!!2999!! Guid generation not allowed in recovery mode (file: %s)" % (fnlist[i]))
guid = ''
status = False
fguids.insert(i, guid)
else:
if not fguids[i]:
guid = findGuid(analJob, metadata_filename, fnlist[i])
if guid and guid != "":
tolog("Found guid for file: %s (%s)" % (fnlist[i], guid))
else:
if not jr:
guid = getGUID()
tolog("Generated guid for file (%d): %s (%s)" % (i, fnlist[i], guid))
else:
tolog("!!WARNING!!2999!! Guid generation not allowed in recovery mode (file: %s)" % (fnlist[i]))
guid = ''
status = False
try:
fguids[i] = guid
except:
fguids.insert(i, guid)
if fntag == "lfn":
# check for file size
try:
_dummy = fsize[i]
del _dummy
except IndexError, e:
#print "This item doesn't exist"
fsize.insert(i, "")
# check for checksum
try:
_dummy = checksum[i]
del _dummy
except IndexError, e:
#print "This item doesn't exist"
checksum.insert(i, "")
glist = glist + fguids
if fntag == "pfn":
#create the PoolFileCatalog.xml-like file in the workdir
fd = open(fname, "w")
fd.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
fd.write("<!-- Edited By POOL -->\n")
fd.write('<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n')
fd.write("<POOLFILECATALOG>\n")
for i in range(0, len(flist)): # there's only one file in flist if it is for the object store
fd.write(' <File ID="%s">\n' % (glist[i]))
fd.write(" <physical>\n")
fd.write(' <pfn filetype="ROOT_All" name="%s"/>\n' % (flist[i]))
fd.write(" </physical>\n")
fd.write(" </File>\n")
fd.write("</POOLFILECATALOG>\n")
fd.close()
elif fntag == "lfn":
# create the metadata.xml-like file that's needed by dispatcher jobs
fd=open(fname, "w")
fd.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
fd.write("<!-- ATLAS file meta-data catalog -->\n")
fd.write('<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n')
fd.write("<POOLFILECATALOG>\n")
for i in range(0, len(flist)):
fd.write(' <File ID="%s">\n' % (glist[i]))
fd.write(" <logical>\n")
fd.write(' <lfn name="%s"/>\n' % (flist[i]))
fd.write(" </logical>\n")
# if the log is to be transferred to an OS, add an endpoint tag
if logToOS and alog == flist[i]:
fd.write(' <endpoint>%s-ddmendpoint_tobeset</endpoint>\n' % (alog))
# add SURL metadata (not known yet) for server LFC registration
# use the GUID as identifier (the string "<GUID>-surltobeset" will later be replaced with the SURL)
if thisExperiment:
special_xml = thisExperiment.getMetadataForRegistration(glist[i])
if special_xml != "":
fd.write(special_xml)
# add log file metadata later (not known yet)
if flist[i] == alog:
fd.write(' <metadata att_name="fsize" att_value=""/>\n')
fd.write(' <metadata att_name="csumtypetobeset" att_value=""/>\n')
elif (additionalOutputFile and flist[i] == additionalOutputFile):
if ".xml" in additionalOutputFile:
fd.write(' <metadata att_name="fsizeXML" att_value=""/>\n')
fd.write(' <metadata att_name="csumtypetobesetXML" att_value=""/>\n')
else:
fd.write(' <metadata att_name="fsizeAdditional" att_value=""/>\n')
fd.write(' <metadata att_name="csumtypetobesetAdditional" att_value=""/>\n')
else:
if len(fsize) != 0:
fd.write(' <metadata att_name="fsize" att_value="%s"/>\n' % (fsize[i]))
if len(checksum) != 0:
fd.write(' <metadata att_name="%s" att_value="%s"/>\n' %\
(SiteMover.getChecksumType(checksum[i]), checksum[i]))
fd.write(" </File>\n")
fd.write("</POOLFILECATALOG>\n")
fd.close()
else:
tolog("!!WARNING!!1234!! fntag is neither lfn nor pfn, did not manage to create the XML file for output files")
# dump the file to the log
dumpFile(fname, topilotlog=True)
return status
def stageInPyModules(initdir, workdir):
""" copy pilot python modules into pilot workdir from condor initial dir """
status = True
ec = 0
tolog('initdir is %s '%initdir)
tolog('workdir is %s '%workdir)
if workdir and initdir:
for k in getFileList():
if os.path.isfile("%s/%s" % (initdir, k)):
try:
shutil.copy2("%s/%s" % (initdir, k), workdir)
except Exception, e:
tolog("!!WARNING!!2999!! stageInPyModules failed to copy file %s/%s to %s: %s" % (initdir, k, workdir, e))
status = False
break
elif os.path.isdir("%s/%s" % (initdir, k)):
try:
shutil.copytree("%s/%s" % (initdir, k), "%s/%s" % (workdir,k))
except Exception, e:
tolog("!!WARNING!!2999!! stageInPyModules failed to copy directory %s/%s to %s: %s" % (initdir, k, workdir, e))
status = False
break
else:
tolog("!!WARNING!!2999!! File missing during stage in: %s/%s" % (initdir, k))
if status:
tolog("Pilot modules have been copied to %s" % (workdir))
else:
# get error handler
error = PilotErrors()
ec = error.ERR_GENERALERROR
return ec
def removePyModules(_dir):
""" Remove pilot python modules from workdir """
if _dir:
for k in getFileList(path_dir=_dir):
if not "runargs" in k:
try:
os.system("rm -rf %s/%s*" % (_dir, k))
except:
pass
def timeStamp():
""" return ISO-8601 compliant date/time format """
tmptz = time.timezone
if tmptz > 0:
signstr = '-'
else:
signstr = '+'
tmptz_hours = int(tmptz/3600)
return str("%s%s%02d%02d" % (time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime()), signstr, tmptz_hours, int(tmptz/60-tmptz_hours*60)))
def timeStampUTC(t=None, format="%d %b %H:%M:%S"):
""" return UTC time stamp """
if not t:
t = time.time()
return time.strftime(format, time.gmtime(t))
def getJobStatus(jobId, pshttpurl, psport, path):
"""
Return the current status of job <jobId> from the dispatcher
typical dispatcher response: 'status=finished&StatusCode=0'
StatusCode 0: succeeded
10: time-out
20: general error
30: failed
In the case of time-out, the dispatcher will be asked one more time after 10s
"""
status = 'unknown'
StatusCode = -1
nod = {}
nod['ids'] = jobId
url = "%s:%s/server/panda/getStatus" % (pshttpurl, repr(psport))
# ask dispatcher about lost job status
trial = 1
max_trials = 2
while trial <= max_trials:
try:
# open connection
ret = httpConnect(nod, url, path=path, mode="GETSTATUS")
response = ret[1]
tolog("response: %s" % str(response))
if response:
try:
# decode the response
# eg. var = ['status=notfound', 'attemptNr=0', 'StatusCode=0']
# = response
# create a dictionary of the response (protects against future updates)
# eg. dic = {'status': 'activated', 'attemptNr': '0', 'StatusCode': '0'}
# dic = {}
# for i in range(len(var)):
# key = var[i].split('=')[0]
# dic[key] = var[i].split('=')[1]
status = response['status'] # e.g. 'holding'
attemptNr = int(response['attemptNr']) # e.g. '0'
StatusCode = int(response['StatusCode']) # e.g. '0'
except Exception, e:
tolog("!!WARNING!!2997!! Exception: Dispatcher did not return allowed values: %s, %s" % (str(ret), e))
status = "unknown"
attemptNr = -1
StatusCode = 20
else:
tolog("!!WARNING!!2998!! Dispatcher did not return allowed values: %s" % str(ret))
status = "unknown"
attemptNr = -1
StatusCode = 20
except Exception,e:
tolog("Could not interpret job status from dispatcher: %s, %s" % (response, e))
status = 'unknown'
attemptNr = -1
StatusCode = -1
break
else:
if StatusCode == 0: # success
break
elif StatusCode == 10: # time-out
trial = trial + 1
time.sleep(10)
continue
elif StatusCode == 20: # other error
if ret[0] == 13056 or ret[0] == '13056':
tolog("Wrong certificate used with curl operation? (encountered error 13056)")
break
else: # general error
break
return status, attemptNr, StatusCode
def getExitCode(path, filename):
""" Try to read the exit code from the pilot stdout log """
ec = -1
# first create a tmp file with only the last few lines of the status file to avoid
# reading a potentially long status file
tmp_file_name = "tmp-tail-dump-file"
try:
os.system("tail %s/%s >%s/%s" % (path, filename, path, tmp_file_name))
except Exception, e:
tolog("Job Recovery could not create tmp file %s: %s" % (tmp_file_name, e))
else:
# open the tmp file and look for the pilot exit info
try:
tmp_file = open(tmp_file_name, 'r')
except IOError:
tolog("Job Recovery could not open tmp file")
else:
try:
all_lines = tmp_file.readlines()
except Exception, e:
tolog("Job Recovery could not read tmp file %s: %s" % (tmp_file_name, e))
tmp_file.close()
# remove the tmp file
try:
os.remove(tmp_file_name)
except OSError:
tolog("Job Recovery could not remove tmp file: %s" % tmp_file)
# now check the pilot exit info, if is has an exit code - remove the directory
exit_info = re.compile(r"job ended with \(trf,pilot\) exit code of \(\d+,\d+\)")
exitinfo_has_exit_code = False
for line in all_lines:
if re.findall(exit_info, line):
exitinfo_has_exit_code = True
if exitinfo_has_exit_code:
tolog("Job had an exit code")
# ...
else:
tolog("Job had no exit code")
return ec
def getRemainingOutputFiles(outFiles):
"""
Return list of files if there are remaining output files in the lost job data directory
"""
remaining_files = []
for file_name in outFiles:
if os.path.exists(file_name):
remaining_files.append(file_name)
return remaining_files
def remove(entries):
"""
Remove files and directories
entries should be a list
"""
status = True
# protect against wrong usage
if type(entries) == list:
if len(entries) > 0:
for entry in entries:
try:
os.system("rm -rf %s" %entry)
except OSError:
tolog("Could not remove %s" % entry)
status = False
else:
tolog("Argument has wrong type, expected list: %s" % str(type(entries)))
status = False
return status
def getCPUmodel():
""" Get cpu model and cache size from /proc/cpuinfo """
# model name : Intel(R) Xeon(TM) CPU 2.40GHz
# cache size : 512 KB
# gives the return string "Intel(R) Xeon(TM) CPU 2.40GHz 512 KB"
cpumodel = ''
cpucache = ''
modelstring = ''
try:
f = open('/proc/cpuinfo', 'r')
except Exception, e:
tolog("Could not open /proc/cpuinfo: %s" % e)
else:
re_model = re.compile('^model name\s+:\s+(\w.+)')
re_cache = re.compile('^cache size\s+:\s+(\d+ KB)')
# loop over all lines in cpuinfo
for line in f.readlines():
# try to grab cpumodel from current line
model = re_model.search(line)
if model:
# found cpu model
cpumodel = model.group(1)
# try to grab cache size from current line
cache = re_cache.search(line)
if cache:
# found cache size
cpucache = cache.group(1)
# stop after 1st pair found - can be multiple cpus
if cpumodel and cpucache:
# create return string
modelstring = cpumodel + " " + cpucache
break
f.close()
# default return string if no info was found
if not modelstring:
modelstring = "UNKNOWN"
return modelstring
def getExeErrors(startdir, fileName):
""" Extract exeErrorCode and exeErrorDiag from jobInfo.xml """
exeErrorCode = 0
exeErrorDiag = ""
# first check if the xml file exists (e.g. it doesn't exist for a test job)
findFlag = False
line = ""
# try to locate the file
out = commands.getoutput("find %s -name %s" % (startdir, fileName))
if out != "":
for line in out.split('\n'):
tolog("Found trf error file at: %s" % (line))
findFlag = True
break # just in case, but there should only be one trf error file
if findFlag:
if os.path.isfile(line):
# import the xml functions
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
import JobInfoXML
# Create a parser
parser = make_parser()
# Tell the parser we are not interested in XML namespaces
parser.setFeature(feature_namespaces, 0)
# Create the handler
dh = JobInfoXML.JobInfoXML()
# Tell the parser to use our handler
parser.setContentHandler(dh)
# Parse the input
parser.parse(line)
# get the error code and the error message(s)
exeErrorCode = dh.getCode()
exeErrorDiag = dh.getMessage()
else:
tolog("Could not read trf error file: %s" % (line))
else:
tolog("Could not find trf error file %s in search path %s" % (fileName, startdir))
# only return a maximum of 250 characters in the error message (as defined in PandaDB)
return exeErrorCode, exeErrorDiag[:250]
def debugInfo(_str, tofile=True):
""" print the debug string to stdout"""
tolog("DEBUG: %s" % (_str), tofile=tofile)
def isBuildJob(outFiles):
"""
Check if the job is a build job
(i.e. check if the job only has one output file that is a lib file)
"""
isABuildJob = False
# outFiles only contains a single file for build jobs, the lib file
if len(outFiles) == 1:
# e.g. outFiles[0] = user.paulnilsson.lxplus001.lib._001.log.tgz
if outFiles[0].find(".lib.") > 0:
isABuildJob = True
return isABuildJob
def OSBitsCheck():
""" determine whether the platform is a 32 or 64-bit OS """
b = -1
try:
a = commands.getoutput('uname -a')
b = a.find('x86_64')
except:
return 32 # default
else:
if b == -1 : # 32 bit OS
return 32
else: # 64 bits OS
return 64
def uniqueList(input_list):
"""
return a list of unique entries
input_list = ['1', '1', '2'] -> ['1', '2']
"""
u = {}
for x in input_list:
u[x] = 1
return u.keys()
def diffLists(list1, list2):
"""
compare the input lists (len(list1) must be > len(list2))
and return the difference
"""
d = {}
for x in list1:
d[x] = 1
for x in list2:
if d.has_key(x):
del d[x]
return d.keys()
def getOutputFileInfo(outputFiles, checksum_cmd, skiplog=False, logFile=""):
"""
Return lists with file sizes and checksums for the given output files
"""
tolog("getOutputFileInfo")
ec = 0
fsize = []
checksum = []
# get error handler
error = PilotErrors()
pilotErrorDiag = ""
# add the log file if necessary (when this function is called from RunJob)
# WARNING: temporary redundancy. fsize and checksum is checked again in mover code, merge later
if logFile != "":
outputFiles.insert(0, logFile)
for filename in outputFiles:
# add "" for the log metadata since it has not been created yet
if filename == logFile and skiplog:
ec = -1
else:
from SiteMover import SiteMover
ec, pilotErrorDiag, _fsize, _checksum = SiteMover.getLocalFileInfo(filename, csumtype=checksum_cmd)
tolog("Adding %s,%s for file %s using %s" % (_fsize, _checksum, filename, checksum_cmd))
if ec == 0:
fsize.append(_fsize)
checksum.append(_checksum)
else:
if ec == error.ERR_FAILEDMD5LOCAL or ec == error.ERR_FAILEDADLOCAL:
fsize.append(_fsize)
checksum.append("")
else:
fsize.append("")
checksum.append("")
if ec != -1: # skip error message for log
tolog("!!WARNING!!4000!! getFileInfo received an error from getLocalFileInfo for file: %s" % (filename))
tolog("!!WARNING!!4000!! ec = %d, pilotErrorDiag = %s, fsize = %s, checksum = %s" %\
(ec, pilotErrorDiag, str(_fsize), str(_checksum)))
else:
tolog("setting ec=0 (%d)" % (ec))
# do not return -1 as en error message since it only applies to log files
ec = 0
if logFile != "":
outputFiles.remove(logFile)
#tolog("Going to return %d,%s,%s,%s" % (ec, pilotErrorDiag, fsize, checksum))
return ec, pilotErrorDiag, fsize, checksum
def updateMetadata(fname, fsize, checksum, format=None, fsizeXML=None, checksumXML=None, fsizeAdditional=None, checksumAdditional=None):
"""
Add the fsize and checksum values for the log (left empty until this point)
Return exit code and xml
If format = NG, then the NorduGrid format of the metadata will be assumed
fsizeXML and checksumXML are extra attributes needed for CERNVM xml file handling
"""
ec = 0
lines = ""
try:
f = open(fname, 'r')
except Exception, e:
tolog("Failed to open metadata file: %s" % e)
ec = -1
else:
if format == 'NG':
metadata1 = '<size></size>'
new_metadata1 = '<size>%s</size>' % (fsize)
else:
metadata1 = '<metadata att_name="fsize" att_value=""/>'
new_metadata1 = '<metadata att_name="fsize" att_value="%s"/>' % (fsize)
# find out if checksum or adler32 should be added
from SiteMover import SiteMover
csumtype = SiteMover.getChecksumType(checksum)
# special handling for CERNVM metadata
if checksumXML:
metadata4 = '<metadata att_name="csumtypetobesetXML" att_value=""/>'
else:
metadata4 = 'notused'
if fsizeXML:
metadata5 = '<metadata att_name="fsizeXML" att_value=""/>'
new_metadata5 = '<metadata att_name="fsize" att_value="%s"/>' % (fsizeXML)
else:
metadata5 = 'notused'
new_metadata5 = ''
if checksumAdditional:
metadata6 = '<metadata att_name="csumtypetobesetXML" att_value=""/>'
if csumtype == "adler32":
new_metadata6 = '<ad32>%s</ad32>' % (checksumAdditional)
else:
new_metadata6 = '<md5sum>%s</md5sum>' % (checksumAdditional)
else:
metadata6 = 'notused'
new_metadata6 = ''
if fsizeAdditional:
metadata7 = '<metadata att_name="fsizeAdditional" att_value=""/>'
new_metadata7 = '<metadata att_name="fsize" att_value="%s"/>' % (fsizeAdditional)
else:
metadata7 = 'notused'
new_metadata7 = ''
# for NG and CERNVM
if format == 'NG':
if csumtype == "adler32":
metadata2 = '<ad32></ad32>'
new_metadata2 = '<ad32>%s</ad32>' % (checksum)
else:
metadata2 = '<md5sum></md5sum>'
new_metadata2 = '<md5sum>%s</md5sum>' % (checksum)
else:
if csumtype == "adler32":
metadata2 = '<metadata att_name="adler32" att_value=""/>'
new_metadata2 = '<metadata att_name="adler32" att_value="%s"/>' % (checksum)
else:
metadata2 = '<metadata att_name="md5sum" att_value=""/>'
new_metadata2 = '<metadata att_name="md5sum" att_value="%s"/>' % (checksum)
metadata3 = '<metadata att_name="csumtypetobeset" att_value=""/>'
for line in f.readlines():
newline = ""
if line.find(metadata1) != -1:
newline = line.replace(metadata1, new_metadata1)
lines += newline
elif line.find(metadata2) != -1:
newline = line.replace(metadata2, new_metadata2)
lines += newline
elif line.find(metadata3) != -1:
newline = line.replace(metadata3, new_metadata2)
lines += newline
elif line.find(metadata4) != -1:
newline = line.replace(metadata4, new_metadata2)
lines += newline
elif line.find(metadata5) != -1:
newline = line.replace(metadata5, new_metadata5)
lines += newline
elif line.find(metadata6) != -1:
newline = line.replace(metadata6, new_metadata6)
lines += newline
elif line.find(metadata7) != -1:
newline = line.replace(metadata7, new_metadata7)
lines += newline
elif line.find('csumtypetobeset') != -1:
newline = line.replace()
else:
lines += line
f.close()
try:
f = open(fname, 'w')
f.write(lines)
f.close()
except Exception, e:
tolog("Failed to write new metadata for log: %s" % e)
ec = -1
return ec, lines
def removeFiles(dir, _fileList):
"""
Remove files from the work dir
"""
ec = 0
found = 0
for _file in _fileList:
if os.path.isfile("%s/%s" % (dir, _file)):
try:
os.remove("%s/%s" % (dir, _file))
except Exception, e:
tolog("Failed to remove file: %s/%s, %s" % (dir, _file, e))
ec = 1
else:
tolog("Removed file: %s/%s" % (dir, _file))
found += 1
if found > 0:
tolog("Removed %d/%d file(s)" % (found, len(_fileList)))
return ec
def createPoolFileCatalog(file_dictionary, lfns, pfc_name="PoolFileCatalog.xml", forceLogical=False):
"""
Create the PoolFileCatalog.xml
file_dictionary = { guid1 : sfn1, ... }
Adapted from R. Walker's code
Note: the logical tag can cause problems in some cases (for the payload).
"""
outxml = ''
if len(file_dictionary) == 0:
tolog('No input files so no PFC created')
else:
dom = minidom.getDOMImplementation()
doctype = dom.createDocumentType("POOLFILECATALOG","","InMemory")
doc = dom.createDocument(None, "POOLFILECATALOG", doctype)
root = doc.documentElement
doc.appendChild(root)
# Prepare plain text as can`t trust minidom on python <2.3
pfc_text = '<?xml version="1.0" ?>\n'
pfc_text += '<!-- Edited By the PanDA Pilot -->\n'
pfc_text += '<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n'
pfc_text += '<POOLFILECATALOG>\n'
# Strip .N because stagein makes soft link, and params have no .N
for guid in file_dictionary.keys():
sfn = file_dictionary[guid]
ftype='ROOT_All'
_file = doc.createElement("File")
_file.setAttribute('ID', guid)
root.appendChild(_file)
# physical element - file in local directory without .N extension
physical = doc.createElement("physical")
_file.appendChild(physical)
pfn = doc.createElement('pfn')
pfn.setAttribute('filetype', ftype)
pfn.setAttribute('name', sfn)
physical.appendChild(pfn)
# forceLogical is set for TURL based PFCs. In this case, the LFN must not contain any legacy __DQ2-parts
if forceLogical:
logical = doc.createElement('logical')
logical.setAttribute('name', os.path.basename(sfn))
_file.appendChild(logical)
# remove any legacy __DQ2 substring from the LFN if necessary
_lfn = getLFN(sfn, lfns) #os.path.basename(sfn)
if "__DQ2" in _lfn:
_lfn = stripDQ2FromLFN(_lfn)
# remove any rucio :-separator if present
if ":" in _lfn:
_lfn = _lfn.split(":")[1]
pfc_text += ' <File ID="%s">\n <physical>\n <pfn filetype="%s" name="%s"/>\n </physical>\n <logical>\n <lfn name="%s"/>\n </logical>\n </File>\n' % (guid, ftype, sfn, _lfn)
else:
logical = doc.createElement('logical')
_file.appendChild(logical)
pfc_text += ' <File ID="%s">\n <physical>\n <pfn filetype="%s" name="%s"/>\n </physical>\n <logical/>\n </File>\n' %\
(guid, ftype, sfn)
pfc_text += '</POOLFILECATALOG>\n'
# tolog(str(doc.toxml()))
# add escape character for & (needed for google turls)
if '&' in pfc_text:
pfc_text = pfc_text.replace('&', '&')
tolog(pfc_text)
try:
tolog("Writing XML to %s" % (pfc_name))
f = open(pfc_name, 'w')
f.write(pfc_text)
except Exception, e:
tolog("!!WARNING!!2999!! Could not create XML file: %s" % (e))
else:
tolog("Created PFC XML")
f.close()
outxml = pfc_text
return outxml
def replace(filename, stext, rtext):
""" replace string stext with rtext in file filename """
status = True
try:
_input = open(filename, "r")
except Exception, e:
tolog("!!WARNING!!4000!! Open failed with %s" % e)
status = False
else:
try:
output = open(filename + "_tmp", "w")
except Exception, e:
tolog("!!WARNING!!4000!! Open failed with %s" % e)
status = False
_input.close()
else:
for s in _input.xreadlines():
output.write(s.replace(stext, rtext))
_input.close()
# rename tmp file and overwrite original file
try:
os.rename(filename + "_tmp", filename)
except Exception, e:
tolog("!!WARNING!!4000!! Rename failed with %s" % e)
status = False
output.close()
return status
def getDirectAccessDic(qdata):
""" return the directAccess dictionary in case the site supports direct access / file stager """
# task: create structure
# directAccess = {
# 'oldPrefix' : 'gsiftp://osgserv04.slac.stanford.edu/xrootd/atlas',
# 'newPrefix' : 'root://atl-xrdr.slac.stanford.edu:1094//atlas/xrootd',
# 'useCopyTool' : False,
# 'directIn' : True
# 'useFileStager' : True
# }
# from queuedata variable copysetup
# copysetup = setup_string^oldPrefix^newPrefix^useFileStager^directIn
# example:
# copysetup=^gsiftp://osgserv04.slac.stanford.edu/xrootd/atlas^root://atl-xrdr.slac.stanford.edu:1094//atlas/xrootd^False^True
# (setup_string not used)
# (all cases tested)
# qdata = 'whatever^gsiftp://osgserv04.slac.stanford.edu/xrootd/atlas^root://atl-xrdr.slac.stanford.edu:1094//atlas/xrootd^False^True'
# qdata = '^gsiftp://osgserv04.slac.stanford.edu/xrootd/atlas^root://atl-xrdr.slac.stanford.edu:1094//atlas/xrootd^False^True'
# qdata = 'gsiftp://osgserv04.slac.stanford.edu/xrootd/atlas^root://atl-xrdr.slac.stanford.edu:1094//atlas/xrootd^False^True'
# qdata = 'setup^gsiftp://osgserv04.slac.stanford.edu/xrootd/atlas^root://atl-xrdr.slac.stanford.edu:1094//atlas/xrootd^False^False^False'
# qdata = '' or 'whatever'
# For TURL PFC creation, the copysetup has the following structure
# copysetup = setup_string^useFileStager^directIn
directAccess = None
if qdata.find('^') > -1:
n = qdata.count('^')
i = 0
# protect against a forgotten inital ^ in case the setup_string is empty!
if n >= 2 and n <= 5:
# read data
data = qdata.split('^')
# get the setup file (actually not used here)
# _setup = data[0]
if n != 2:
# get file transfer prefices
i += 1
oldPrefix = data[i]
i += 1
newPrefix = data[i]
else:
oldPrefix = ""
newPrefix = ""
# get file stager mode
i += 1
if data[i].lower() == 'true':
useFileStager = True
useCopyTool = False
else:
useFileStager = False
useCopyTool = True
# get direct access mode
i += 1
if data[i].lower() == 'true':
directIn = True
useCopyTool = False
else:
directIn = False
if not useFileStager:
useCopyTool = True
else:
useCopyTool = False
# if useFileStager:
# tolog("!!WARNING!!2999!! direct access mode reset to True (can not be False in combination with file stager mode)")
# directIn = True
# useCopyTool = False
# else:
# directIn = False
# useCopyTool = True
# in case copysetup contains a third boolean (old)
if n == 5:
tolog("!!WARNING!!2999!! Update schedconfig to use new direct access format: copysetup = setup_string^oldPrefix^newPrefix^useFileStager^directIn")
if data[n].lower() == 'true':
useFileStager = True
directIn = True
useCopyTool = False
else:
useFileStager = False
directIn = False
useCopyTool = True
# create structure
directAccess = {
'oldPrefix': oldPrefix,
'newPrefix': newPrefix,
'useCopyTool': useCopyTool,
'directIn': directIn,
'useFileStager': useFileStager
}
else:
tolog("!!WARNING!!4000!! copysetup has wrong format: %s" % (qdata))
else:
# do nothing, don't care about the copysetup right now (only later in Mover)
pass
return directAccess
def getErrors(filename):
""" get all !!TEXT!!NUMBER!!... errors from file """
ret = ""
try:
f = open(filename)
lines = f.readlines()
f.close()
except Exception, e:
tolog("!!WARNING!!4000!! could not open/read file: %s" % e)
else:
p = r"!!(\S+)!!\d+!!"
pattern = re.compile(p)
for line in lines:
if re.findall(pattern, line):
ret += line
return ret
def getLFN(pfn, lfns):
""" Identify the LFN from the list of LFNs corresponding to a PFN """
# Note: important since the basename of the PFN can contain additional characters,
# e.g. PFN = /../data15_cos.00259101.physics_IDCosmic.merge.RAW._lb0116._SFO-ALL._0001.1_1427497374
# but LFN = data15_cos.00259101.physics_IDCosmic.merge.RAW._lb0116._SFO-ALL._0001.1
lfn = ""
for _lfn in lfns:
_basename = os.path.basename(pfn)
if _lfn in _basename:
# if _basename.endswith(_lfn):
# Handle scopes in case they are present
if ":" in _basename:
l = _basename.split(":")
lfn = l[1]
else:
lfn = _lfn
if lfn == "":
tolog("!!WARNING!!2323!! Correct LFN could not be identified: pfn=%s, lfns=%s (assume basename of PFN)" % (pfn, str(lfns)))
lfn = os.path.basename(pfn)
return lfn
def makeTransRegReport(all_transferred, some_transferred, latereg, nr_transferred, nr_files, fail, ec, ret, fields):
""" make the transfer and registration report """
error = PilotErrors()
tolog("")
tolog("..Transfer and registration report.........................................................................")
tolog(". Mover has finished")
if all_transferred and not latereg:
if nr_files > 1:
tolog(". All (%d) files have been transferred and registered" % (nr_files))
else:
tolog(". The single file has been transferred and registered")
elif all_transferred and latereg:
if nr_files > 1:
tolog(". All (%d) files have been transferred but not registered" % (nr_files))
tolog(". The files will be registrered by a later pilot if job recovery is supported,")
else:
tolog(". The single file has been transferred but not registered")
tolog(". The file will be registrered by a later pilot if job recovery is supported,")
tolog(". otherwise this job will fail")
elif some_transferred and latereg:
tolog(". Some files (%d/%d) were transferred but no file was registered" % (nr_transferred, nr_files))
tolog(". The remaining files will be transferred and registrered by a later pilot if job recovery is supported,")
tolog(". otherwise this job will fail")
elif some_transferred and not latereg:
tolog(". Some files (%d/%d) were transferred and registered" % (nr_transferred, nr_files))
tolog(". The remaining files will be transferred and registrered by a later pilot if job recovery is supported,")
tolog(". otherwise this job will fail")
elif not some_transferred:
tolog(". No files (%d/%d) were transferred or registered" % (nr_transferred, nr_files))
if nr_files > 1:
tolog(". The files will be transferred and registrered by a later pilot if job recovery is supported,")
else:
tolog(". The file will be transferred and registrered by a later pilot if job recovery is supported,")
tolog(". otherwise this job will fail")
else:
tolog(". Mover has finished")
if fail != 0:
tolog(". File transfer exit code : (%d, %s)" % (fail, error.getErrorStr(fail)))
else:
tolog(". File transfer exit code : (%d, <no error>)" % (fail))
if some_transferred:
tolog(". File registration return values : (%d, %s, %s)" %\
(ec, error.getErrorStr(ec), str(ret)))
tolog(". Put function will return fields : %s" % str(fields))
tolog(". Transfer and registration report produced at : %s" % timeStamp())
tolog("...........................................................................................................")
tolog("")
def hasBeenTransferred(fields):
""" determine whether files were successfully transferred """
status = False
s = 0
# the fields will all be empty if no files were transferred
for field in fields:
s += len(field)
if s > 0:
status = True
return status
def removeSRMInfo(f0):
""" remove any SRM info from the f0 string """
from SiteMover import SiteMover
fields0 = ""
for pfns in f0.split("+"):
if pfns != "":
pfns = SiteMover.stripPortAndVersion(pfns)
fields0 += "%s+" % (pfns)
# remove any trailing +-sign
if fields0[-1] == "+":
fields0 = fields0[:-1]
if fields0 == "":
fields0 = f0
if f0 != fields0:
tolog("removeSRMInfo() has updated %s to %s" % (f0, fields0))
return fields0
def isAnalysisJob(trf):
""" Determine whether the job is an analysis job or not """
if (trf.startswith('https://') or trf.startswith('http://')):
analysisJob = True
else:
analysisJob = False
return analysisJob
def timedCommand(cmd, timeout=300):
""" Protect cmd with timed_command """
tolog("Executing command: %s (protected by timed_command, timeout: %d s)" % (cmd, timeout))
t0 = os.times()
try:
from TimerCommand import TimerCommand
timerCommand = TimerCommand(cmd)
exitcode, output = timerCommand.run(timeout=timeout)
except Exception, e:
pilotErrorDiag = 'TimedCommand() threw an exception: %s' % e
tolog("!!WARNING!!2220!! %s" % pilotErrorDiag)
exitcode = 1
output = str(e)
else:
if exitcode != 0:
tolog("!!WARNING!!2220!! Timed command returned: %s" % (output))
t1 = os.times()
telapsed = int(round(t1[4] - t0[4]))
tolog("Elapsed time: %d" % (telapsed))
if telapsed >= timeout:
tolog("!!WARNING!!2220!! Command timed out")
output += " (timed out)"
# timed_command adds a trailing \n, remove it
if output.endswith('\n'):
output = output[:-1]
return exitcode, output
def stringToFields(jobFields):
""" Convert a jobState string to a fields array """
jobFields = jobFields.replace('[','').replace(']','')
jobFields = jobFields.replace("\'","")
rf = jobFields.split(',')
fields = []
for f in rf:
fields += [f.strip()]
return fields
def readpar(parameter, alt=False, version=0, queuename=None):
""" Read 'parameter' from queuedata via SiteInformation class """
from SiteInformation import SiteInformation
si = SiteInformation()
return si.readpar(parameter, alt=alt, version=version, queuename=queuename)
def getBatchSystemJobID():
""" return the batch system job id (will be reported to the server) """
# BQS (e.g. LYON)
if os.environ.has_key("QSUB_REQNAME"):
return "BQS", os.environ["QSUB_REQNAME"]
# BQS alternative
if os.environ.has_key("BQSCLUSTER"):
return "BQS", os.environ["BQSCLUSTER"]
# Torque
if os.environ.has_key("PBS_JOBID"):
return "Torque", os.environ["PBS_JOBID"]
# LSF
if os.environ.has_key("LSB_JOBID"):
return "LSF", os.environ["LSB_JOBID"]
# Sun's Grid Engine
if os.environ.has_key("JOB_ID"):
return "Grid Engine", os.environ["JOB_ID"]
# Condor (variable sent through job submit file)
if os.environ.has_key("clusterid"):
return "Condor", os.environ["clusterid"]
# Condor (get jobid from classad file)
if os.environ.has_key("_CONDOR_JOB_AD"):
return "Condor", commands.getoutput('sed -n "s/GlobalJobId.*\\"\\(.*\\)\\".*/\\1/p" %s' % os.environ["_CONDOR_JOB_AD"])
# SLURM
if os.environ.has_key("SLURM_JOB_ID"):
return "SLURM", os.environ["SLURM_JOB_ID"]
# # Condor (id unknown)
# if os.environ.has_key("_CONDOR_SCRATCH_DIR"):
# return "Condor", "(unknown clusterid)"
return None, ""
def touch(filename):
""" touch a file """
if not os.path.isfile(filename):
try:
os.system("touch %s" % (filename))
except Exception, e:
tolog("!!WARNING!!1000!! Failed to touch file: %s" % e)
else:
tolog("Lock file created: %s" % (filename))
def createLockFile(jobrec, workdir, lockfile="LOCKFILE"):
"""
Site workdir protection to prevent the work dir from being deleted by the cleanup
function if pilot fails to register the log
"""
# only try to create a lock file if it doesn't exist already
# do not bother to create it if the site doesn't allow for job recovery
f = "%s/%s" % (workdir, lockfile)
if lockfile == "LOCKFILE":
if jobrec:
touch(f)
else:
touch(f)
def checkLockFile(workdir, lockfile):
"""checks if a lockfile exists in path
workdir/lockfile
"""
f = '%s/%s' % (workdir, lockfile)
return os.path.isfile(f)
def verifyTransfer(workdir, verbose=True):
""" verify that all files were transferred by checking the existance of the ALLFILESTRANSFERRED lockfile """
status = False
fname = "%s/ALLFILESTRANSFERRED" % (workdir)
if os.path.exists(fname):
if verbose:
tolog("Verified: %s" % (fname))
status = True
else:
if verbose:
tolog("Transfer verification failed: %s (file does not exist)" % (fname))
return status
def removeLEDuplicates(logMsg):
""" identify duplicated messages in the log extracts and remove them """
# first create a new log extracts list that does not have the time stamps
# (which will be different for the otherwise different messages)
# E.g.:
# 31 Mar 2008 01:32:37| !!WARNING!!1999!! Could not read modification time of ESD.020072._04023.pool.root.9
# 31 Mar 2008 02:03:08| !!WARNING!!1999!! Could not read modification time of ESD.020072._04023.pool.root.9
# should only be printed once,
# 31 Mar 2008 01:32:37| !!WARNING!!1999!! Could not read modification time of ESD.020072._04023.pool.root.9
log_extracts_list = logMsg.split('\n')
# create a temporary list with stripped timestamp fields
log_extracts_tmp = []
pattern = re.compile(r"(\d+ [A-Za-z]+ \d+ \d+:\d+:\d+\|)")
for line in log_extracts_list:
# id the time stamp
found = re.findall(pattern, line)
if len(found) > 0:
# remove any time stamp
line = line.replace(found[0], '')
log_extracts_tmp.append(line)
# remove duplicate lines and create an index list to know where the original line was
# (we want to bring back the timestamp)
# do not use dictionaries since they are not sorted
i = 0
log_extracts_index = []
log_extracts_tmp2 = []
for line in log_extracts_tmp:
if line not in log_extracts_tmp2:
log_extracts_index.append(i)
log_extracts_tmp2.append(line)
i += 1
# create the final list
log_extracts_tmp = []
for index in log_extracts_index:
log_extracts_tmp.append(log_extracts_list[index])
# return the stripped logMsg
return "\n".join(log_extracts_tmp)
def writeTimeStampToFile(path="", filename="", overwrite=True):
""" Write the current time stamp to file """
if filename == "":
filename = "START_TIME"
if path == "":
path = os.getcwd()
_filename = os.path.join(path, filename)
# Are we allowed to overwrite?
proceed = False
if overwrite:
proceed = True
else:
# Only proceed if the file does not exist already
if not os.path.exists(_filename):
proceed = True
if proceed:
writeToFile(_filename, timeStampUTC(format='%Y-%m-%d %H:%M:%S'))
def writeToFile(filename, s):
""" Write string s to file """
# Ignore write status
status = writeToFileWithStatus(filename, s)
def writeToFileWithStatus(filename, s, attribute="w"):
""" Write string s to file with status return """
status = False
try:
f = open(filename, attribute)
except Exception, e:
tolog("!!WARNING!!2990!! Could not open: %s, %s" % (filename, e))
else:
f.write("%s" % (s))
f.close()
tolog('Wrote string "%s" to file: %s' % (s.replace('\n',''), filename))
status = True
return status
def readCodeFromFile(filename):
""" Wead exit code from file <workdir>/EXITCODE """
ec = 0
if os.path.exists(filename):
try:
f = open(filename, "r")
except Exception, e:
tolog("Failed to open %s: %s" % (filename, e))
else:
ec = int(f.read())
tolog("Found code %d in file %s" % (ec, filename))
f.close()
else:
tolog("No code to report (file %s does not exist)" % (filename))
return ec
def readStringFromFile(filename):
""" read exit code from file <workdir>/EXITCODE """
s = ""
if os.path.exists(filename):
try:
f = open(filename, "r")
except Exception, e:
tolog("Failed to open %s: %s" % (filename, e))
else:
s = f.read()
tolog("Found string %s in file %s" % (s, filename))
f.close()
else:
tolog("No string to report (file %s does not exist)" % (filename))
return s
def verifyQueuedata(queuename, filename, _i, _N, url):
""" check if the downloaded queuedata has the proper format """
hasQueuedata = False
try:
f = open(filename, "r")
except Exception, e:
tolog("!!WARNING!!1999!! Open failed with %s" % e)
else:
output = f.read()
f.close()
if not ('appdir' in output and 'copytool' in output):
if len(output) == 0:
tolog("!!WARNING!!1999!! curl command returned empty queuedata (wrong queuename %s?)" % (queuename))
else:
tolog("!!WARNING!!1999!! Attempt %d/%d: curl command did not return valid queuedata from config DB server %s" %\
(_i, _N, url))
output = output.replace('\n', '')
output = output.replace(' ', '')
tolog("!!WARNING!!1999!! Output begins with: %s" % (output[:64]))
try:
os.remove(filename)
except Exception, e:
tolog("!!WARNING!!1999!! Failed to remove file %s: %s" % (filename, e))
else:
# found valid queuedata info, break the for-loop
tolog("schedconfigDB returned: %s" % (output))
hasQueuedata = True
return hasQueuedata
def isSameType(trf, userflag):
""" is the lost job of same type as the current pilot? """
# treat userflag 'self' as 'user'
if userflag == 'self':
userflag = 'user'
if (isAnalysisJob(trf) and userflag == 'user') or \
(not isAnalysisJob(trf) and userflag != 'user'):
sametype = True
if userflag == 'user':
tolog("Lost job is of same type as current pilot (analysis pilot, lost analysis job trf: %s)" % (trf))
else:
tolog("Lost job is of same type as current pilot (production pilot, lost production job trf: %s)" % (trf))
else:
sametype = False
if userflag == 'user':
tolog("Lost job is not of same type as current pilot (analysis pilot, lost production job trf: %s)" % (trf))
else:
tolog("Lost job is not of same type as current pilot (production pilot, lost analysis job trf: %s)" % (trf))
return sametype
def getGuidsFromXML(dir, id=None, filename=None, metadata=""):
""" extract the guid matching the filename from the xml, or all guids if filename not set """
guids = []
if metadata != "":
metadata_filename = metadata
else:
# are we in recovery mode? then id is set
if id:
metadata_filename = "%s/metadata-%s.xml" % (dir, id)
else:
metadata_filename = "%s/metadata.xml" % (dir)
xmldoc = minidom.parse(metadata_filename)
_fileList = xmldoc.getElementsByTagName("File")
for thisfile in _fileList:
gpfn = str(thisfile.getElementsByTagName("lfn")[0].getAttribute("name"))
if (filename and gpfn == filename) or (not filename):
guid = str(thisfile.getAttribute("ID"))
guids.append(guid)
return guids
def addToSkipped(lfn, guid):
""" add metadata for skipped file """
ec = 0
try:
# append to skipped.xml file
fd = open("skipped.xml", "a")
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s" % e)
ec = -1
else:
fd.write(' <File ID="%s">\n' % (guid))
fd.write(" <logical>\n")
fd.write(' <lfn name="%s"/>\n' % (lfn))
fd.write(" </logical>\n")
fd.write(" </File>\n")
fd.close()
return ec
def addSkippedToPFC(fname, skippedfname):
""" add skipped input file info to metadata.xml """
ec = 0
try:
fd = open(skippedfname, "r")
except Exception, e:
tolog("!!WARNING!!2999!! %s" % e)
ec = -1
else:
skippedXML = fd.read()
fd.close()
try:
fdPFC = open(fname, "r")
except Exception, e:
tolog("!!WARNING!!2999!! %s" % e)
ec = -1
else:
PFCXML = fdPFC.read()
fdPFC.close()
if ec == 0:
# add the skipped file info to the end of the PFC
PFCXML = PFCXML.replace("</POOLFILECATALOG>", skippedXML)
PFCXML += "</POOLFILECATALOG>\n"
# move the old PFC and create a new PFC
try:
os.system("mv %s %s.BAK2" % (fname, fname))
except Exception, e:
tolog("!!WARNING!!2999!! %s" % e)
ec = -1
else:
try:
fdNEW = open(fname, "w")
except Exception, e:
tolog("!!WARNING!!2999!! %s" % e)
ec = -1
else:
fdNEW.write(PFCXML)
fdNEW.close()
tolog("Wrote updated XML with skipped file info:\n%s" % (PFCXML))
return ec
def verifyReleaseString(release):
""" Verify that the release (or homepackage) string is set """
if release == None:
release = ""
release = release.upper()
if release == "":
release = "NULL"
if release == "NULL":
tolog("Detected unset (NULL) release/homepackage string")
return release
class _Curl:
""" curl class """
# constructor
def __init__(self):
# path to curl
self.path = 'curl'
# verification of the host certificate
self._verifyHost = True
# modified for Titan test
if ('HPC_Titan' in readpar("catchall")) or ('ORNL_Titan_install' in readpar("nickname")):
self._verifyHost = False
# request a compressed response
self.compress = True
# SSL cert/key
from SiteInformation import SiteInformation
si = SiteInformation()
self.sslCert = si.getSSLCertificate()
self.sslKey = self.sslCert
# CA cert dir
self.sslCertDir = si.getSSLCertificatesDirectory()
# GET method
def get(self, url, data, path):
# make command
com = '%s --silent --get' % self.path
if "HPC_HPC" in readpar('catchall'):
com += ' --tlsv1'
com += ' --connect-timeout 100 --max-time 120'
if not self._verifyHost:
com += ' --insecure'
if self.compress:
com += ' --compressed'
if self.sslCertDir != '':
com += ' --capath %s' % self.sslCertDir
if self.sslCert != '':
com += ' --cert %s --cacert %s' % (self.sslCert, self.sslCert)
if self.sslKey != '':
com += ' --key %s' % self.sslKey
#com += ' --verbose'
# data
if 'nJobs' in data:
com += ' --header "Accept: application/json"'
strData = ''
for key in data.keys():
strData += 'data="%s"\n' % urllib.urlencode({key:data[key]})
jobId = ''
if 'jobId' in data.keys():
jobId = '_%s' % data['jobId']
# write data to temporary config file
# tmpName = commands.getoutput('uuidgen 2> /dev/null')
tmpName = '%s/curl_%s%s.config' % (path, os.path.basename(url), jobId)
try:
tmpFile = open(tmpName, 'w')
tmpFile.write(strData)
tmpFile.close()
except IOError, e:
tolog("!!WARNING!!2999!! %s" % e)
if os.path.exists(tmpName):
com += ' --config %s' % tmpName
else:
tolog("!!WARNING!!2999!! Can not set --config option since %s could not be created, curl will fail" % tmpName)
com += ' %s' % url
# execute
tolog("Executing command: %s" % (com))
try:
ret = commands.getstatusoutput(com)
except Exception, e:
tolog("!!WARNING!!1111!! Caught exception from curl command: %s" % (e))
ret = [-1, e]
# remove temporary file
#os.remove(tmpName)
return ret
# POST method
def post(self, url, data, path):
# make command
com = '%s --silent --show-error' % self.path
if "HPC_HPC" in readpar('catchall'):
com += ' --tlsv1'
com += ' --connect-timeout 100 --max-time 120'
if not self._verifyHost:
com += ' --insecure'
if self.compress:
com += ' --compressed'
if self.sslCertDir != '':
com += ' --capath %s' % self.sslCertDir
if self.sslCert != '':
com += ' --cert %s --cacert %s' % (self.sslCert, self.sslCert)
if self.sslKey != '':
com += ' --key %s' % self.sslKey
#com += ' --verbose'
# data
if 'nJobs' in data:
com += ' --header "Accept: application/json"'
strData = ''
for key in data.keys():
strData += 'data="%s"\n' % urllib.urlencode({key:data[key]})
curl_config = '%s/curl.config' % path
try:
jobId = ''
if 'jobId' in data.keys():
jobId = '_%s' % data['jobId']
elif 'eventRanges' in data.keys():
eventRanges = json.loads(data['eventRanges'])
if 'eventRanges' in eventRanges[0]:
jobId = '_%s' % eventRanges[0]['eventRanges'][0]['eventRangeID'].split('-')[1]
else:
jobId = '_%s' % eventRanges[0]['eventRangeID'].split('-')[1]
curl_config = '%s/curl_%s%s.config' % (path, os.path.basename(url), jobId)
except:
tolog("%s" % traceback.format_exc())
# write data to temporary config file
tmpName = curl_config
try:
tmpFile = open(tmpName,'w')
tmpFile.write(strData)
tmpFile.close()
except IOError, e:
tolog("!!WARNING!!2999!! %s" % e)
if os.path.exists(tmpName):
com += ' --config %s' % tmpName
else:
tolog("!!WARNING!!2999!! Can not set --config option since curl.config could not be created, curl will fail")
com += ' %s' % url
# execute
tolog("Executing command: %s" % (com))
try:
ret = commands.getstatusoutput(com)
except Exception, e:
tolog("!!WARNING!!1111!! Caught exception from curl command: %s" % (e))
ret = [-1, e]
# remove temporary file
#os.remove(tmpName)
return ret
# PUT method
def put(self, url, data):
# make command
com = '%s --silent' % self.path
if "HPC_HPC" in readpar('catchall'):
com += ' --tlsv1'
if not self._verifyHost:
com += ' --insecure'
if self.compress:
com += ' --compressed'
if self.sslCertDir != '':
com += ' --capath %s' % self.sslCertDir
if self.sslCert != '':
com += ' --cert %s --cacert %s' % (self.sslCert, self.sslCert)
if self.sslKey != '':
com += ' --key %s' % self.sslKey
#com += ' --verbose'
# emulate PUT
for key in data.keys():
com += ' -F "%s=@%s"' % (key,data[key])
com += ' %s' % url
# execute
tolog("Executing command: %s" % (com))
try:
ret = commands.getstatusoutput(com)
except Exception, e:
tolog("!!WARNING!!1111!! Caught exception from curl command: %s" % (e))
ret = [-1, e]
return ret
def verifyHost(self, verify):
# set _verifyHost
self._verifyHost = verify
# send message to pandaLogger
def toPandaLogger(data):
try:
tpre = datetime.datetime.utcnow()
except:
pass
tolog("toPandaLogger: len(data) = %d" % len(data))
tolog("data = %s" % str(data))
response = None
# instantiate curl
curl = _Curl()
url = 'http://pandamon.cern.ch/system/loghandler'
curlstat, response = curl.get(url, data, os.getcwd())
try:
tpost = datetime.datetime.utcnow()
tolog("Elapsed seconds: %d" % ((tpost-tpre).seconds))
except:
pass
try:
if curlstat == 0:
# parse response message
outtxt = response.lower()
if outtxt.find('<html>') > 0:
if outtxt.find('read timeout') > 0:
tolog("!!WARNING!!2999!! Timeout on dispatcher exchange")
else:
tolog("!!WARNING!!2999!! HTTP error on dispatcher exchange")
tolog("HTTP output: %s" % (response))
return EC_Failed, None, None
# create the parameter list from the dispatcher response
data, response = parseDispatcherResponse(response)
#status = int(data['StatusCode'])
#if status != 0:
# # pilotErrorDiag = getDispatcherErrorDiag(status)
# tolog("Dumping %s/curl.config file:" % (path))
# dumpFile('%s/curl.config' % (path), topilotlog=True)
else:
tolog("!!WARNING!!2999!! Dispatcher message curl error: %d " % (curlstat))
tolog("Response = %s" % (response))
tolog("Dumping curl.config file:")
dumpFile('%s/curl.config' % (path), topilotlog=True)
return curlstat, None, None
return 0, data, response
except:
_type, value, _traceback = sys.exc_info()
tolog("ERROR : %s %s" % ( _type, traceback.format_exc()))
return EC_Failed, None, None
def verifyJobState(state):
""" Make sure the state is an allowed value """
allowed_values = ['running', 'failed', 'finished', 'holding', 'starting', 'transferring']
if state in allowed_values:
tolog("Job state \'%s\' is an allowed job state value" % (state))
else:
tolog("!!WARNING!!3333!! Job state \'%s\' is not an allowed server job state value, job can fail" % (state))
if state == 'setup' or state == 'stagein' or state == 'stageout':
state = 'running'
tolog("Switched to running state for server update")
else:
state = 'failed'
return state
# send message to dispatcher
def toServer(baseURL, cmd, data, path, experiment):
""" sends 'data' using command 'cmd' to the dispatcher """
try:
tpre = datetime.datetime.utcnow()
except:
pass
tolog("toServer: cmd = %s" % (cmd))
tolog("toServer: len(data) = %d" % len(data))
tolog("data = %s" % str(data))
# make sure the job state is an allowed value
if data.has_key('state'):
data['state'] = verifyJobState(data['state'])
# instantiate curl
curl = _Curl()
# use insecure for dev server
#if 'voatlas220' in baseURL:
# curl.verifyHost(False)
# execute
if cmd == "getStatus":
url = baseURL
else:
url = baseURL + '/' + cmd
curlstat, response = curl.post(url, data, path)
try:
tpost = datetime.datetime.utcnow()
tolog("Elapsed seconds: %d" % ((tpost-tpre).seconds))
except:
pass
curl_config = '%s/curl.config' % path
try:
jobId = ''
if 'jobId' in data.keys():
jobId = '_%s' % data['jobId']
elif 'eventRanges' in data.keys():
jobId = '_%s' % data['eventRanges'][0]['eventRangeID'].split('-')[1]
curl_config = '%s/curl_%s%s.config' % (path, os.path.basename(url), jobId)
except:
pass
try:
if curlstat == 0:
# parse response message
outtxt = response.lower()
if outtxt.find('<html>') > 0:
if outtxt.find('read timeout') > 0:
tolog("!!WARNING!!2999!! Timeout on dispatcher exchange")
else:
tolog("!!WARNING!!2999!! HTTP error on dispatcher exchange")
tolog("HTTP output: %s" % (response))
return EC_Failed, None, None
# create the parameter list from the dispatcher response
data, response = parseDispatcherResponse(response)
# update the dispatcher data for Event Service merge jobs
if experiment != "": # experiment is only set for GETJOB, skip this otherwise
data = updateDispatcherData4ES(data, experiment, path)
if 'StatusCode' not in data:
status = EC_Failed
tolog("!!WARNING!!2999!! Dispatcher response: %s" % data)
else:
status = int(data['StatusCode'])
if status != 0:
# pilotErrorDiag = getDispatcherErrorDiag(status)
tolog("Dumping curl config file: %s" % (curl_config))
dumpFile(curl_config, topilotlog=True)
else:
tolog("!!WARNING!!2999!! Dispatcher message curl error: %d " % (curlstat))
tolog("Response = %s" % (response))
tolog("Dumping curl.config file: %s" % curl_config)
dumpFile(curl_config, topilotlog=True)
return curlstat, None, None
if status == 0:
return status, data, response
else:
return status, None, None
except:
_type, value, _traceback = sys.exc_info()
tolog("ERROR %s : %s %s" % (cmd, _type, traceback.format_exc()))
return EC_Failed, None, None
def getPilotToken(tofile=False):
""" read the pilot token from file """
pilottoken = None
filename = "pilottoken.txt"
if os.path.exists(filename):
try:
f = open(filename, "r")
except Exception, e:
tolog("!!WARNING!!2999!! Could not open pilot token file: %s" % e, tofile=tofile)
else:
try:
pilottoken = f.read()
except Exception, e:
tolog("!!WARNING!!2999!! Could not read pilot token: %s" % e, tofile=tofile)
else:
f.close()
tolog("Successfully read pilot token", tofile=tofile)
try:
os.remove(filename)
except Exception, e:
tolog("!!WARNING!!2999!! Could not remove pilot token file: %s" % e, tofile=tofile)
else:
tolog("Pilot token file has been removed", tofile=tofile)
return pilottoken
def removeSubFromResponse(response):
""" Remove any _subNNN strings from the dataset variables (realDatasets and destinationDblock) """
tolog("response='%s'"%(response))
pattern = re.compile('\S+(\_sub[0-9]+)')
match = pattern.match(response)
if match:
# strip away the _subNNN string
try:
response = response.replace(match.group(1), '')
except Exception, e:
tolog("!!WARNING!!1119!! Failed to remove _sub string (%s) from dispatcher response: %s" % (match.group(1), e))
else:
tolog("Updated dispatcher response (removed %s): %s" % (match.group(1), response))
else:
tolog("Found no _subNNN string in the dispatcher response")
return response
def createESFileDictionary(writeToFile):
""" Create the event range file dictionary from the writeToFile info """
# writeToFile = 'fileNameForTrf_1:LFN_1,LFN_2^fileNameForTrf_2:LFN_3,LFN_4'
# -> esFileDictionary = {'fileNameForTrf_1': 'LFN_1,LFN_2', 'fileNameForTrf_2': 'LFN_3,LFN_4'}
# Also, keep track of the dictionary keys (e.g. 'fileNameForTrf_1') ordered since we have to use them to update the jobParameters
# once we know the full path to them (i.e. '@fileNameForTrf_1:..' will be replaced by '@/path/filename:..')
# (the dictionary is otherwise not ordered so we cannot simply use the dictionary keys later)
# fileInfo = ['fileNameForTrf_1:LFN_1,LFN_2', 'fileNameForTrf_2:LFN_3,LFN_4']
fileInfo = writeToFile.split("^")
esFileDictionary = {}
orderedFnameList = []
for i in range(len(fileInfo)):
# Extract the file name
if ":" in fileInfo[i]:
finfo = fileInfo[i].split(":")
# add cwd before the lfns
#finfo[1] = "`pwd`/" + finfo[1]
#finfo[1] = finfo[1].replace(',',',`pwd`/')
# fix the issue that some athena 20 releases have _000 at the end of the filename
if finfo[0].endswith("_000"):
tolog("replace %s with %s" % (finfo[0], finfo[0][:-4]))
finfo[0] = finfo[0][:-4]
esFileDictionary[finfo[0]] = finfo[1]
orderedFnameList.append(finfo[0])
else:
tolog("!!WARNING!!4444!! File info does not have the correct format, expected a separator \':\': %s" % (fileInfo[i]))
esFileDictionary = {}
break
return esFileDictionary, orderedFnameList
def writeToInputFile(path, esFileDictionary, orderedFnameList, eventservice=True):
""" Write the input file lists to the proper input file """
# And populate the fname file dictionary
# fnames = { 'identifier': '/path/filename', .. }
# where 'identifier' will be present in the jobParameters, like @identifier. This will later be replaced by the proper file path
# To be used by the TRF instead of a potentially very long LFN list
# esFileDictionary = {'fileNameForTrf_1': 'LFN_1,LFN_2', 'fileNameForTrf_2': 'LFN_3,LFN_4'}
# 'LFN_1,LFN_2' will be written to file 'fileNameForTrf_1', etc
ec = 0
fnames = {}
i = 0
for fname in esFileDictionary.keys():
_path = os.path.join(path, fname.replace('.pool.root.', '.txt.'))
try:
f = open(_path, "w")
except IOError, e:
tolog("!!WARNING!!4445!! Failed to open file %s: %s" % (_path, e))
ec = -1
else:
if eventservice:
f.write("--inputHitsFile\n")
else: # For Reco
f.write("--inputAODFile\n")
for inputFile in esFileDictionary[fname].split(","):
pfn = os.path.join(path, inputFile)
f.write(pfn + "\n")
#f.write(esFileDictionary[fname].replace(",","\n"))
f.close()
tolog("Wrote input file list to file %s: %s" % (_path, str(esFileDictionary[fname])))
fnames[fname] = _path
i += 1
return ec, fnames
def updateESGUIDs(guids):
""" Update the NULL valued ES guids """
# necessary since guids are used as dictionary keys in some places
# replace the NULL values with different values
# guids = 'NULL,NULL,NULL,sasdasdasdasdd'
# -> 'DUMMYGUID0,DUMMYGUID1,DUMMYGUID2,sasdasdasdasdd'
for i in range(guids.count('NULL')):
guids = guids.replace('NULL', 'DUMMYGUID%d' % (i), 1)
return guids
def getESInputFiles(esFileDictionary):
""" Get all the input files from all the keys in the event range file dictionary """
# Concaternated into one file list, e.g.
# esFileDictionary = {'fileNameForTrf_1': 'LFN_1,LFN_2', 'fileNameForTrf_2': 'LFN_3,LFN_4'}
# -> 'LFN_1,LFN_2','LFN_3,LFN_4'
files = ""
for fname in esFileDictionary.keys():
if files != "":
files += ","
files += esFileDictionary[fname]
return files
def updateJobPars(jobPars, fnames):
""" Replace the @identifiers with the full paths """
for identifier in fnames.keys():
jobPars = jobPars.replace("@%s" % (identifier), "@%s" % (fnames[identifier]))
tolog("%s: %s" % (identifier, fnames[identifier]))
s = ["--inputHitsFile=", "--inputAODFile="]
for t in s:
if t in jobPars:
jobPars = jobPars.replace(t, "")
break
return jobPars
def updateDispatcherData4ES(data, experiment, path):
""" Update the input file list for Event Service merge jobs """
# For Event Service merge jobs, the input file list will not arrive in the inFiles
# list as usual, but in the writeToFile field, so inFiles need to be corrected
# path = pilot_init dir, so we know where the file list files are written
# data = data={'jobsetID': '2235472772', .. }
# The input file list should be updated
if data.has_key('writeToFile'):
writeToFile = data['writeToFile']
esFileDictionary, orderedFnameList = createESFileDictionary(writeToFile)
tolog("esFileDictionary=%s" % (esFileDictionary))
tolog("orderedFnameList=%s" % (orderedFnameList))
if esFileDictionary != {}:
"""
# Replace the @inputFor* directorive with the file list
for name in orderedFnameList:
tolog("Replacing @%s with %s" % (name, esFileDictionary[name]))
data['jobPars'] = data['jobPars'].replace("@%s" % (name), esFileDictionary[name])
"""
# fix the issue that some athena 20 releases have _000 at the end of the filename
for name in orderedFnameList:
name_000 = "@%s_000 " % (name)
new_name = "@%s " % (name)
if name_000 in data['jobPars']:
tolog("%s in jobPars, replace it with %s" % (name_000, new_name))
data['jobPars'] = data['jobPars'].replace(name_000, new_name)
# Remove the autoconf
if "--autoConfiguration=everything " in data['jobPars']:
data['jobPars'] = data['jobPars'].replace("--autoConfiguration=everything ", " ")
# Write event service file lists to the proper input file
#ec, fnames = writeToInputFile(path, esFileDictionary, orderedFnameList)
ec = 0
if ec == 0:
#inputFiles = getESInputFiles(esFileDictionary)
# Update the inFiles list (not necessary??)
#data['inFiles'] = inputFiles
# Correct the dsname?
# filesize and checksum? not known (no file catalog)
# Replace the NULL valued guids for the ES files
data['GUID'] = updateESGUIDs(data['GUID'])
# Replace the @identifiers in the jobParameters
#data['jobPars'] = updateJobPars(data['jobPars'], fnames)
# Update the copytoolin (should use the proper objectstore site mover)
si = getSiteInformation(experiment)
if not os.environ.has_key('Nordugrid_pilot') and data.has_key('eventServiceMerge'):
ec = si.replaceQueuedataField("copytoolin", "objectstore")
else:
tolog("Cannot continue with event service merge job")
else:
tolog("Empty event range dictionary")
else:
tolog("writeToFile not present in job def")
return data
def parseDispatcherResponse(response):
""" Create the parameter list from the dispatcher response """
# use this when listFilesInDataset usage is not needed any more (v 51b)
# # remove any _subNNN strings if necessary (from dataset names)
# if "_sub" in response:
# response = removeSubFromResponse(response)
try:
parList = json.loads(response)
except:
data = {}
parList = cgi.parse_qsl(response, keep_blank_values=True)
for p in parList:
data[p[0]] = p[1]
if 'userProxy' in str(parList) or 'privateKey' in str(parList):
for i in range(len(parList)):
if parList[i][0] == 'userProxy' or parList[i][0] == 'publicKey' or parList[i][0] == 'privateKey':
newList = list(parList[i])
newList[1] = 'hidden'
parList[i] = newList
else:
data = parList.copy()
if 'jobs' in parList:
for p in parList['jobs']:
if 'userProxy' in p:
p['userProxy'] = 'hidden'
if 'privateKey' in p:
p['privateKey'] = 'hidden'
if 'publicKey' in p:
p['publicKey'] = 'hidden'
tolog("Dispatcher response: %s" % str(parList))
return data, response
def grep(patterns, file_name):
""" Search for the patterns in the given list in a file """
# Example:
# grep(["St9bad_alloc", "FATAL"], "athena_stdout.txt")
# -> [list containing the lines below]
# CaloTrkMuIdAlg2.sysExecute() ERROR St9bad_alloc
# AthAlgSeq.sysExecute() FATAL Standard std::exception is caught
matched_lines = []
p = []
for pattern in patterns:
p.append(re.compile(pattern))
try:
f = open(file_name, "r")
except IOError, e:
tolog("!!WARNING!!2999!! %s" % e)
else:
while True:
# get the next line in the file
line = f.readline()
if not line:
break
# can the search pattern be found
for cp in p:
if re.search(cp, line):
matched_lines.append(line)
f.close()
return matched_lines
def getJobReport(filename):
""" Extract the job report from the stdout, or the last N lines """
report = ""
if os.path.exists(filename):
pattern = re.compile("Job Report produced by")
try:
f = open(filename, "r")
except IOError, e:
tolog("!!WARNING!!1299!! %s" % e)
else:
matched_lines = []
status = True
first_report = True
while status:
# get the next line in the file
line = f.readline()
if not line:
break
# find the start position of the job report and grab all remaining lines
if re.search(pattern, line) and first_report:
# the job report is repeated, only grab it the second time it appears
first_report = False
elif re.search(pattern, line) and not first_report:
# save the job report title line
line = line.replace("=====", "-")
matched_lines.append(line)
while True:
line = f.readline()
if not line:
status = False
break
matched_lines.append(line)
# grab the last couple of lines in case the trf failed before the job report was printed
if len(matched_lines) == 0:
N = 10
tolog("Job report could not be found in the payload stdout, will add the last %d lines instead for the log extracts" % (N))
report = "- Last %d lines from %s -\n" % (N, filename)
report = report + tail(filename, N)
else:
report = "".join(matched_lines)
f.close()
else:
tolog("!!WARNING!!1299!! File %s does not exist" % (filename))
return report
def tail(filename, number_of_lines):
""" Grab the last N lines from a file """
report = ""
if os.path.exists(filename):
try:
# U is to open it with Universal newline support
f = open(filename, "rU")
except IOError, e:
tolog("!!WARNING!!1299!! %s" % e)
else:
read_size = 1024
offset = read_size
f.seek(0, 2)
file_size = f.tell()
# abort if zero file size
if file_size == 0:
tolog("!!WARNING!!1299!! File %s has zero size" % (filename))
else:
# loop over file
while True:
if file_size < offset:
offset = file_size
f.seek(-offset, 2)
read_str = f.read(offset)
try:
# Remove newline at the end
if read_str[offset - 1] == '\n':
read_str = read_str[:-1]
lines = read_str.split('\n')
# Got number_of_lines lines
if len(lines) >= number_of_lines:
report = "\n".join(lines[-number_of_lines:])
break
except Exception, e:
# the following message will be visible in the log extracts
report = "!!WARNING!!1299!! tail command caught an exception when reading payload stdout: %s" % e
tolog(report)
break
# Reached the beginning
if offset == file_size:
report = read_str
break
offset += read_size
f.close()
else:
tolog("!!WARNING!!1299!! File %s does not exist" % (filename))
return report
def filterJobReport(report):
""" Extract the exit and error code from the job report """
filtered_report = ""
filters = ["ExitCode", "ErrorCode"]
patterns = []
for _filter in filters:
patterns.append(re.compile(_filter))
# loop over the full report
if report != "":
header = True
for line in report.split("\n"):
# grab the header line
if header:
filtered_report += line + "\n"
header = False
# match the exit and error code lines
for pattern in patterns:
if re.search(pattern, line):
filtered_report += line + "\n"
else:
tolog("!!WARNING!!2999!! Found empty job report")
return filtered_report
def removeDuplicates(seq):
""" Order preserving duplicate removal """
checked = []
for entry in seq:
if entry not in checked:
checked.append(entry)
return checked
def dumpOrderedItems(l):
""" dump list l """
_i = 0
for item in l:
_i += 1
if item == "":
tolog("%d. <empty>" % (_i))
else:
tolog("%d. %s" % (_i, item))
def getDatasetDict(outputFiles, destinationDblock, logFile, logFileDblock, archive=False):
""" Create a dataset dictionary """
datasetDict = None
# verify that the lists are of equal size
if len(outputFiles) != len(destinationDblock):
tolog("WARNING: Lists are not of same length: len(outputFiles)=%d, len(destinationDblock)=%d" % (len(outputFiles), len(destinationDblock)))
elif len(outputFiles) == 0:
tolog("No output files for this job (outputFiles has zero length)")
elif len(destinationDblock) == 0:
tolog("WARNING: destinationDblock has zero length")
else:
# verify that lists contains valid entries
_l = [outputFiles, destinationDblock]
ok = True
for _list in _l:
for _entry in _list:
if _entry == "NULL" or _entry == "" or _entry == " " or _entry == None:
# ignore if archive
if not archive:
tolog("!!WARNING!!2999!! Found non-valid entry in list: %s" % str(_list))
ok = False
break
if ok:
# build the dictionary
try:
datasetDict = dict(zip(outputFiles, destinationDblock))
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught in getDatasetDict(): %s" % e)
datasetDict = None
else:
# add the log file info
datasetDict[logFile] = logFileDblock
return datasetDict
def getFileGuid(metadata_filename, guid_filename):
""" read the log guid from metadata """
logFileGuid = ""
if os.path.exists(metadata_filename):
try:
xmldoc = minidom.parse(metadata_filename)
_fileList = xmldoc.getElementsByTagName("File")
for thisfile in _fileList:
lfn = str(thisfile.getElementsByTagName("lfn")[0].getAttribute("name"))
_guid = str(thisfile.getAttribute("ID"))
if guid_filename == lfn:
logFileGuid = _guid
tolog("Guid %s belongs to file %s" % (_guid, lfn))
except Exception, e:
tolog("!!WARNING!!2999!! Could not parse the metadata - guids unknown: %s" % (e))
else:
tolog("!!WARNING!!2999!! Could not locate %s, log file guid can not be verified" % (metadata_filename))
return logFileGuid
def tailPilotErrorDiag(pilotErrorDiag, size=256):
""" Return the last n characters of pilotErrorDiag """
try:
return pilotErrorDiag[-size:]
except:
return pilotErrorDiag
def headPilotErrorDiag(pilotErrorDiag, size=256):
""" Return the first n characters of pilotErrorDiag """
try:
return pilotErrorDiag[:size]
except:
return pilotErrorDiag
def getMaxInputSize(MB=False):
""" Return a proper maxinputsize value """
_maxinputsize = readpar('maxwdir') # normally 14336+2000 MB
MAX_INPUT_FILESIZES = 14*1024*1024*1024 # 14 GB, 14336 MB (pilot default)
MAX_INPUT_FILESIZES_MB = 14*1024 # 14336 MB (pilot default)
if _maxinputsize != "":
try:
if MB: # convert to MB int
_maxinputsize = int(_maxinputsize) # MB
else: # convert to B int
_maxinputsize = int(_maxinputsize)*1024*1024 # MB -> B
except Exception, e:
tolog("!!WARNING!!2999!! schedconfig.maxinputsize: %s" % e)
if MB:
_maxinputsize = MAX_INPUT_FILESIZES_MB
else:
_maxinputsize = MAX_INPUT_FILESIZES
else:
if MB:
_maxinputsize = MAX_INPUT_FILESIZES_MB
else:
_maxinputsize = MAX_INPUT_FILESIZES
if MB:
tolog("Max input size = %d MB (pilot default)" % (_maxinputsize))
else:
tolog("Max input size = %d B (pilot default)" % (_maxinputsize))
return _maxinputsize
def getTimeFloor(timefloor_default):
""" Return a proper timefloor """
# timefloor is the time limit within which the pilot is allowed to run multiple jobs
# if at the end of a job, there is enough time (i.e. at least [timefloor] s left), the pilot
# will ask for another job
try:
if timefloor_default != None:
timefloor = timefloor_default
tolog("(Overriding any schedconfig.timefloor with timefloor set by pilot option -C %d)" % (timefloor_default))
else:
timefloor = int(readpar('timefloor'))*60 # assumed to be in minutes, convert into seconds
except:
tolog("Timefloor not set in queuedata (multi-jobs disabled)")
timefloor = 0
else:
_lower = 0
_upper = 60*60*24 # one day
if timefloor == 0:
tolog("Timefloor set to zero in queuedata (multi-jobs disabled)")
elif timefloor > _lower and timefloor <= _upper:
tolog("Timefloor set to %d s" % (timefloor))
else:
tolog("Timefloor (%d s) out of limits (%d s, %d s) - multi-jobs disabled" % (timefloor, _lower, _upper))
timefloor = 0
return timefloor
def getCopysetup(mode="get"):
""" extract a verified copysetup[in] script from queuedata """
copysetup_tmp = readpar('copysetup')
if mode == "get":
_copysetup = readpar('copysetupin')
if _copysetup == "":
# not set, use same copysetup for stage-in as for stage-out
_copysetup = copysetup_tmp
else:
_copysetup = copysetup_tmp
# copysetup can contain ^-signs for remote i/o sites
if _copysetup.find('^') > -1:
copysetup = _copysetup.split('^')[0]
else:
copysetup = copysetup_tmp
# make sure that the script exists
if copysetup == "":
tolog("No copysetup found")
elif not os.path.exists(copysetup) and '^' not in copysetup:
tolog("!!WARNING!!2998!! copysetup does not exist: %s (reset to empty string)" % (copysetup))
copysetup = ""
elif '^' in copysetup:
tolog("No path in copysetup (%s, reset to empty string)" % (copysetup))
copysetup = ""
else:
tolog("Extracted copysetup: %s" % (copysetup))
return copysetup
def verifyLFNLength(outputFiles):
""" Make sure that the LFNs are all within the allowed length """
ec = 0
pilotErrorDiag = ""
error = PilotErrors()
MAXFILENAMELENGTH = 255
# loop over all output files
for fileName in outputFiles:
if len(fileName) > MAXFILENAMELENGTH:
pilotErrorDiag = "LFN too long (length: %d, must be less than %d characters): %s" % (len(fileName), MAXFILENAMELENGTH, fileName)
tolog("!!WARNING!!2994!! %s" % (pilotErrorDiag))
ec = error.ERR_LFNTOOLONG
else:
tolog("LFN length verified for file %s" % (fileName))
return ec, pilotErrorDiag
def isLogfileCopied(workdir, jobId=None):
""" check whether the log file has been copied or not """
if jobId:
if os.path.exists(workdir + '/LOGFILECOPIED_%s' % jobId):
return True
else:
return False
else:
if os.path.exists(workdir + '/LOGFILECOPIED'):
return True
else:
return False
def isLogfileRegistered(workdir):
""" check whether the log file has been registered or not """
if os.path.exists(workdir + '/LOGFILEREGISTERED'):
return True
else:
return False
def updateJobState(job, site, workNode, recoveryAttempt=0):
""" update the job state file """
status = True
# create a job state object and give it the current job state information
from JobState import JobState
JS = JobState()
if JS.put(job, site, workNode, recoveryAttempt):
if recoveryAttempt > 0:
tolog("Successfully updated job state file (recovery attempt number: %d)" % (recoveryAttempt))
else:
tolog("Successfully updated job state file at: %s" % (JS.getCurrentFilename()))
else:
tolog("!!WARNING!!1000!! Failed to update job state file")
status = False
return status
def chdir(dir):
""" keep track of where we are... """
os.chdir(dir)
tolog("current dir: %s" % (os.getcwd()))
def processDBRelease(inputFiles, inFilesGuids, realDatasetsIn, dispatchDblock, dispatchDBlockToken, prodDBlockToken, workdir, jobPars):
""" remove any DBRelease files from the input file list and send back instruction to move the created DBRelease file to job dir """
_inputFiles = inputFiles
_inFilesGuids = inFilesGuids
_realDatasetsIn = realDatasetsIn
_dispatchDblock = dispatchDblock
_dispatchDBlockToken = dispatchDBlockToken
_prodDBlockToken = prodDBlockToken
# are there any DBRelease files in the input file list?
has_DBRelease_files = False
from DBReleaseHandler import DBReleaseHandler
dbh = DBReleaseHandler(workdir=workdir)
# abort if no local DBRelease dir
if dbh.getDBReleaseDir() == "":
return _inputFiles, _inFilesGuids, _realDatasetsIn, _dispatchDblock, _dispatchDBlockToken, _prodDBlockToken
for f in inputFiles:
# if the DBRelease version can be extracted from the file name, then the file is a DBRelease file..
if dbh.extractVersion(f):
tolog("Found a DBRelease file: %s" % (f))
has_DBRelease_files = True
break
if not has_DBRelease_files:
tolog("No DBRelease files found in input file list")
else:
tolog("Found a DBRelease file in the input file list (will check local availability)")
# get the DBRelease version
# for testing: version = dbh.getDBReleaseVersion(jobPars=jobPars+" DBRelease-9.0.1.tar.gz")
version = dbh.getDBReleaseVersion(jobPars=jobPars)
# create the skeleton DBRelease tarball
if dbh.createDBRelease(version, workdir):
# update the input file list
_inputFiles, _inFilesGuids, _realDatasetsIn, _dispatchDblock, _dispatchDBlockToken, _prodDBlockToken = \
dbh.removeDBRelease(list(inputFiles), list(inFilesGuids), list(realDatasetsIn), list(dispatchDblock), list(dispatchDBlockToken), list(prodDBlockToken))
return _inputFiles, _inFilesGuids, _realDatasetsIn, _dispatchDblock, _dispatchDBlockToken, _prodDBlockToken
def updateXMLWithEndpoints(xml, filenames, endpoints):
""" Replace any 'tobeset' strings in the XML with final ddm endpoints """
# Input: xml (string)
# filenames (list of strings)
# endpoints (list of strings, 1-to-1 mapped to the filenames list)
# Output: xml (string)
# Description: The method looks for patterns '<filename>-ddmendpoint_tobeset' and
# replaces it with the corresponding 'endpoint'
lines = []
for line in xml.split('\n'):
i = 0
s = False
for filename in filenames:
p = filename + '-ddmendpoint_tobeset'
if p in line:
if endpoints[i] == None:
# remove this line without append
s = True
else:
lines.append(line.replace(p, endpoints[i]))
s = True
break
i += 1
if not s:
lines.append(line)
if lines == []:
r = xml
else:
r = '\n'.join(lines)
return r
def updateXMLWithSURLs(experiment, node_xml, workDir, jobId, jobrec, format=''):
""" update the XML with the SURLs """
xml = ""
# read back the SURL dictionary
from SiteMover import SiteMover
sitemover = SiteMover()
surlDictionary = sitemover.getSURLDictionary(workDir, jobId)
# get the experiment object
thisExperiment = getExperiment(experiment)
tolog("node_xml = %s" % (node_xml))
node_xml_list = node_xml.split("\n")
# loop over the xml and update where it is needed
if surlDictionary != {}:
if format == 'NG':
re_tobeset = re.compile('\<surl\>([a-zA-Z0-9-]+)\-surltobeset')
else:
if thisExperiment:
metadata_attr_name = thisExperiment.getAttrForRegistration()
else:
metadata_attr_name = "surl"
re_tobeset = re.compile('\<metadata att\_name\=\"%s\" att\_value\=\"([a-zA-Z0-9-]+)\-surltobeset\"\/\>' % (metadata_attr_name))
for line in node_xml_list:
tobeset = re_tobeset.search(line)
if tobeset:
# extract the guid and surl
guid = tobeset.group(1)
# note: in case of an earlier transfer error, the corresponding guid will not be in the surl dictionary
# since it is only written to the surl dictionary for successful transfers
try:
surl = surlDictionary[guid]
except Exception, e:
tolog("!!WARNING!!2996!! Encountered a missing guid in the surl dictionary - did the corresponding transfer fail? guid = %s, %s" % (guid, e))
# add the 'surltobeset' line when job recovery is used
if jobrec:
xml += line + "\n"
else:
# replace the guid and the "surltobeset"-string with the surl
if surl and surl != "":
xml += line.replace(guid + "-surltobeset", surl) + "\n"
else:
tolog("!!WARNING!!2996!! Could not extract guid %s from xml line: %s" % (guid, line))
# fail safe in case something went wrong above, remove the guid+surltobeset line
elif "surltobeset" in line:
tolog("Failed to remove surltobeset from line: %s" % (line))
else:
xml += line + "\n"
else:
tolog("!!WARNING!!2997!! Encountered an empty SURL dictionary")
# remove the metadata for the SURL since it cannot be updated
for line in node_xml_list:
if not jobrec:
if not "surltobeset" in line:
xml += line + "\n"
else:
xml += line + "\n"
if xml == "\n":
xml = ""
tolog("Reset XML")
return xml
def putMetadata(workdir, jobId, strXML):
""" """
status = False
filename = os.path.join(workdir, "metadata-%s.xml" % (jobId))
try:
f = open(filename, "w")
except OSError, e:
tolog("!!WARNING!!1200!! Failed to open metadata file for writing: %s" % (e))
else:
f.write(strXML)
f.close()
status = True
return status
def getMetadata(workdir, jobId, athena=False, altpath=""):
""" read metadata from file """
strXML = None
if altpath == "":
BAK = ""
if athena:
BAK = ".PAYLOAD"
# are we in recovery mode? then jobId is set
if jobId:
filename = "metadata-%s.xml%s" % (jobId, BAK)
else:
filename = "metadata.xml%s" % (BAK)
fname = os.path.join(workdir, filename)
else:
fname = altpath
tolog("Trying to read metadata from file: %s" % (fname))
if os.path.exists(fname):
try:
f = open(fname)
except Exception, e:
tolog("!!WARNING!!1000!! Can not open the file %s, %s" % (fname, e))
else:
strXML = ""
for line in f:
strXML += line
f.close()
if len(strXML) > 0:
tolog("Found metadata")
else:
tolog("!!WARNING!!1000!! Empty metadata")
else:
tolog("getMetadata: metadata does not seem to have been created (file %s does not exist)" % (fname))
return strXML
def makeJobReport(job, logExtracts, foundCoreDump, version, jobIds):
""" Make the job summary error report. Use info from jobReport.pickle if available """
error = PilotErrors()
perr = job.result[2]
terr = job.result[1]
# was this a multi-trf job?
nJobs = job.jobPars.count("\n")
if nJobs > 0:
multi_trf = True
else:
multi_trf = False
tolog("..Job report..................................................................................................")
tolog(". Pilot version : %s" % (version))
tolog(". Job id : %s" % (job.jobId))
tolog(". Current job status : %s" % (job.result[0]))
if multi_trf:
tolog(". Trf job type : Multi-trf (%d jobs)" % (nJobs + 1))
else:
tolog(". Trf job type : Single trf job")
try: # protect against sites that run older pilots that don't have the finalstate defined
fs = job.finalstate
if fs != "":
tolog(". Final job state : %s" % (fs))
else:
tolog(". Final job state : (not set, job should have failed)")
except:
tolog("(not set - site should update pilot distribution)")
fs = None
if verifyTransfer(job.workdir, verbose=False):
tolog(". All out files transferred : Yes")
else:
tolog(". All out files transferred : No")
if perr != 0:
tolog(". Pilot error code : %d, %s" % (perr, error.getPilotErrorDiag(perr)))
if error.isRecoverableErrorCode(perr) and job.result[0] != "failed":
tolog(". Job is recoverable : Yes")
else:
tolog(". Job is recoverable : No")
else: # perr == 0
tolog(". Pilot error code : %d, (no pilot error)" % (perr))
if fs == "failed" or job.result[0] == "failed":
tolog(". Job is recoverable : No")
if job.pilotErrorDiag != None:
lenPilotErrorDiag = len(job.pilotErrorDiag)
if lenPilotErrorDiag > 250:
tolog(". Length pilot error diag : %d (will be truncated to 250)" % (lenPilotErrorDiag))
else:
tolog(". Length pilot error diag : %d" % (lenPilotErrorDiag))
if job.pilotErrorDiag != "":
l = 100
tolog(". Pilot error diag [%d:] : %s" % (l, headPilotErrorDiag(job.pilotErrorDiag, size=l)))
else:
tolog(". Pilot error diag : Empty")
else:
tolog(". Pilot error diag : None")
fname = getPilotstderrFilename()
if os.path.exists(fname):
_size = os.path.getsize(fname)
if _size > 0:
tolog(". Pilot produced stderr : Yes (size: %d) see dump below" % (_size))
else:
tolog(". Pilot produced stderr : No")
fname = "%s/runjob.stderr" % (job.workdir)
if os.path.exists(fname):
_size = os.path.getsize(fname)
if _size > 0:
tolog(". RunJob produced stderr : Yes (size: %d) see dump below" % (_size))
else:
tolog(". RunJob produced stderr : No")
tolog(". Trf error code : %d" % (terr))
# trf error should have been read from the jobInfo.xml or jobReport* files
if terr != job.exeErrorCode:
tolog(". Trf error code (2) : %d" % job.exeErrorCode)
tolog(". Trf error diagnosis : %s" % job.exeErrorDiag)
if (job.exeErrorCode != 0) and (job.result[1] != job.exeErrorCode):
mismatch = "exeErrorCode = %d, transExitCode = %d" %\
(job.exeErrorCode, job.result[1])
tolog(". Trf error code mismatch : %s" % mismatch)
lenLogExtracts = len(logExtracts)
if lenLogExtracts <= 2048:
tolog(". Length log extracts : %d (preliminary)" % (lenLogExtracts))
else:
tolog(". Length log extracts : %d (will be truncated to 2048)" % (lenLogExtracts))
# did the payload produce any stderr?
if multi_trf:
for _i in range(nJobs + 1):
_stderr = job.stderr
_stderr = _stderr.replace(".txt", "_%d.txt" % (_i + 1))
filename = "%s/%s" % (job.workdir, _stderr)
if os.path.exists(filename):
if os.path.getsize(filename) > 0:
tolog(". Payload %d produced stderr : Yes (check %s)" % (_i + 1, _stderr))
else:
tolog(". Payload %d produced stderr : No (empty %s)" % (_i + 1, _stderr))
else:
tolog(". Payload %d produced stderr: No (%s does not exist)" % (_i + 1, _stderr))
else:
filename = "%s/%s" % (job.workdir, job.stderr)
if os.path.exists(filename):
if os.path.getsize(filename) > 0:
tolog(". Payload produced stderr : Yes (check %s)" % (job.stderr))
else:
tolog(". Payload produced stderr : No (empty %s)" % (job.stderr))
else:
tolog(". Payload produced stderr : No (%s does not exist)" % (job.stderr))
if foundCoreDump:
tolog(". Found core dump in workdir: Yes")
else:
tolog(". Found core dump in workdir: No")
if len(jobIds) > 1:
tolog(". Executed multi-jobs : %s" % str(jobIds))
tolog(". Job was executed in dir : %s" % job.workdir)
tolog(". Error report produced at : %s" % timeStamp())
tolog("..Time report.................................................................................................")
tolog(". CPU consumption time : %s %s" % (str(job.cpuConsumptionTime), job.cpuConsumptionUnit))
tolog(". Payload execution time : %s s" % (str(job.timeExe)))
tolog(". GetJob consumption time : %s s" % (str(job.timeGetJob)))
tolog(". Stage-in consumption time : %s s" % (str(job.timeStageIn)))
tolog(". Stage-out consumption time: %s s" % (str(job.timeStageOut)))
tolog("..............................................................................................................")
# dump the pilot stderr if it exists
fname = getPilotstderrFilename()
if os.path.exists(fname):
if os.path.getsize(fname) > 0:
tolog("\n//begin %s ///////////////////////////////////////////////////////////////////////////" % os.path.basename(fname))
dumpFile(fname, topilotlog=False)
tolog("\n//end %s /////////////////////////////////////////////////////////////////////////////" % os.path.basename(fname))
# dump the wrapper (RunJob) stderr if it exists
fname = "%s/runjob.stderr" % (job.workdir)
if os.path.exists(fname):
if os.path.getsize(fname) > 0:
tolog("\n//begin %s ///////////////////////////////////////////////////////////////////////////" % os.path.basename(fname))
dumpFile(fname, topilotlog=True)
tolog("\n//end %s /////////////////////////////////////////////////////////////////////////////" % os.path.basename(fname))
if job.result[0] == 'finished' or job.result[0] == 'holding':
if job.result[0] == 'holding':
tolog("Note that the following line is a message to the Panda monitor only")
tolog("!!FINISHED!!0!!Job successfully completed")
def safe_call(func, *args):
""" Try-statement wrapper around function call with traceback info """
status = False
try:
func(*args)
except Exception, e:
tolog("!!WARNING!!1111!! Exception in function %s: %s" % (e, func))
tolog("Stack trace:")
import traceback
exc, msg, tb = sys.exc_info()
traceback.print_tb(tb)
else:
status = True
return status
def getDispatcherErrorDiag(ec):
""" Get the corresponding error diag for the dispatcher """
# dispatcher codes
codes = {}
codes[0] = 'Success'
codes[10] = 'Connection timed out'
codes[20] = 'Dispatcher has no jobs'
codes[30] = 'Failed'
codes[40] = 'Non secure'
codes[50] = 'Invalid token'
codes[60] = 'Invalid role'
codes[255] = 'EC_Failed'
pilotErrorDiag = codes.get(ec, 'GETJOB encountered error %d' % (ec))
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return pilotErrorDiag
def getCopyprefixFromTo(copyprefix):
""" extract from and to info from copyprefix """
pfrom = ""
pto = ""
if copyprefix != "":
if copyprefix.count("^") == 1:
pfrom, pto = copyprefix.split("^")
elif copyprefix.startswith("^") or copyprefix.count("^") > 1:
tolog("!!WARNING!!2988!! copyprefix has wrong format (not pfrom^pto): %s" % (copyprefix))
else:
pfrom = copyprefix
if pfrom == "":
pfrom = "dummy"
else:
if pfrom.endswith('/'):
pfrom = pfrom[:-1]
tolog("Cut away trailing / from %s (see copyprefix[in])" % (pfrom))
if pto == "":
pto = "dummy"
return pfrom, pto
def getCopyprefixLists(copyprefix):
""" Get the copyprefix lists """
pfrom, pto = getCopyprefixFromTo(copyprefix)
if "," in pfrom:
pfroms = pfrom.split(",")
else:
pfroms = [pfrom]
if "," in pto:
ptos = pto.split(",")
else:
ptos = [pto]
return pfroms, ptos
def getCmtconfig(jobCmtconfig):
""" Get the cmtconfig from the job def or schedconfig """
# the job def should always contain the cmtconfig
if jobCmtconfig != "" and jobCmtconfig != "None" and jobCmtconfig != "NULL":
cmtconfig = jobCmtconfig
tolog("Will try to use cmtconfig: %s (from job definition)" % (cmtconfig))
else:
cmtconfig = readpar('cmtconfig')
tolog("Will try to use cmtconfig: %s (from schedconfig DB)" % (cmtconfig))
return cmtconfig
def getCmtconfigAlternatives(cmtconfig, swbase):
""" get a list of locally available cmtconfig's that can be used as alternatives to the primary cmtconfig in case it doesn't work """
# prepend the default cmtconfig
alternatives = [cmtconfig]
from glob import glob
# grab all files/directories in swbase dir
dirList = glob(os.path.join(swbase, '*'))
# use a hardcoded cmtconfigvalidation listfor now
valid_names= ['i686-', 'x86_64-']
# are there any dirs that have the cmtconfig pattern? (require at least three '-')
pattern = re.compile('([A-Za-z0-9]+\-[A-Za-z0-9]+\-[A-Za-z0-9]+\-[A-Za-z0-9]+)')
for directory in dirList:
d = os.path.basename(directory)
found = re.search(pattern, d)
if found and d != cmtconfig:
# make surethat weare notpickingup unvalid names (eg 'tags-BNL-Subcluster-4-BNL-ATLAS' would slip through otherwise)
verified = False
for valid_name in valid_names:
# require that the found directory begins with i686- or x86_64-
if d[:len(valid_name)] == valid_name:
verified = True
break
if verified:
alternatives.append(d)
return alternatives
def extractFilePaths(s):
""" Extract file paths from given setup string """
# s = "source /path/setup.sh;export X509_USER_PROXY=/path/x509_up;source aa"
# -> setup_path = ['/path/setup.sh', 'aa']
setup_paths = None
if s != "" and "source " in s:
setup_paths = []
s = s.replace(";;", ";")
# extract all occurances of "source " (with or without a trailing ;)
# first try a pattern ending with a ;
pattern = re.compile(r"source (\S+);")
found = re.findall(pattern, s)
if len(found) > 0:
for i in range(len(found)):
setup_paths.append(found[i])
# remove the found pattern so not to disturb the remaining search
s = s.replace("source %s" % (found[i]), "")
# assume additional patterns with no trailing ;
pattern = re.compile(r"source (\S+)")
found = re.findall(pattern, s)
if len(found) > 0:
for i in range(len(found)):
setup_paths.append(found[i])
if setup_paths == None:
return setup_paths
# note: there is a special case if the first setup path contains an unevaluated environment variable, e.g.
# s = "export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase; source $ATLAS_LOCAL_ROOT_BASE/user/atlasLocalSetup.sh --quiet;"
# -> setup_paths = ['$ATLAS_LOCAL_ROOT_BASE/user/atlasLocalSetup.sh']
# in that case, grab the environment variable from s and replace it in setup_paths
# -> env_variables = {'ATLAS_LOCAL_ROOT_BASE': '/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase'}
# -> setup_paths = ['/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase/user/atlasLocalSetup.sh']
pattern = re.compile(r"export (\S+)\=(\S+)")
t = re.findall(pattern, s) # t = [('ATLAS_LOCAL_ROOT_BASE', '/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase')]
if t != []:
for i in range(len(setup_paths)):
for match_pair in t:
try:
e, v = match_pair
v = v.replace(";", "").strip()
setup_paths[i] = setup_paths[i].replace("$" + e, v).replace("${" + e + "}", v)
except Exception, e:
tolog("WARNNING: Error happened when extracting setup path: %s" % (e))
return setup_paths
def verifySetupCommand(error, _setup_str):
""" Make sure the setup command exists """
ec = 0
pilotErrorDiag = ""
# remove any '-signs
_setup_str = _setup_str.replace("'", "")
tolog("Will verify: %s" % (_setup_str))
if _setup_str != "" and "source " in _setup_str:
# first extract the file paths from the source command(s)
setup_paths = extractFilePaths(_setup_str)
# only run test if string begins with an "/"
if setup_paths:
# verify that the file paths actually exists
for setup_path in setup_paths:
if os.path.exists(setup_path):
tolog("File %s has been verified" % (setup_path))
else:
pilotErrorDiag = "No such file or directory: %s" % (setup_path)
tolog('!!WARNING!!2991!! %s' % (pilotErrorDiag))
ec = error.ERR_NOSUCHFILE
break
else:
# nothing left to test
pass
else:
tolog("Nothing to verify in setup: %s (either empty string or no source command)" % (_setup_str))
return ec, pilotErrorDiag
def getProperTimeout(paths):
""" Return a proper time-out depending on if CVMFS is used or not"""
# paths can contain several paths and commands, just look for the presence of /cvmfs/
if "/cvmfs/" in paths:
_timeout = 1000
else:
_timeout = 500
# other special cases
if os.environ.has_key('FACTORYQUEUE'):
if "BNL_ATLAS_RCF" in os.environ['FACTORYQUEUE']:
_timeout = 1000
return _timeout
def getPilotVersion(initdir):
""" Load the pilot version string from file VERSION """
version = "PICARD"
try:
f = open(os.path.join(initdir, "PILOTVERSION"), "r")
except Exception, e:
print "!!WARNING!!0000!! Could not read pilot version from file: %s" % (e )
else:
_version = f.read().strip()
# remove trailing \n if present
if "\n" in _version:
_version = _version.replace("\n", "")
# trivial check
pattern = re.compile(r"[A-Z]+ [A-Za-z0-9.]+")
v = re.findall(pattern, _version)
if v == []:
print "!!WARNING!!0000!! Not a valid version format: %s" % (version)
else:
version = _version
return version
# Necessary to initiate pilot version at this point, after the function declaration
# Note: this cannot be done in environment.py due to cyclic dependence of pUtil module
env['version'] = getPilotVersion(env['pilot_initdir'])
def getExperiment(experiment):
""" Return a reference to an experiment class """
from ExperimentFactory import ExperimentFactory
factory = ExperimentFactory()
_exp = None
try:
experimentClass = factory.newExperiment(experiment)
except Exception, e:
tolog("!!WARNING!!1114!! Experiment factory threw an exception: %s" % (e))
else:
_exp = experimentClass()
return _exp
def getSiteInformation(experiment):
""" Return a reference to a site information class """
# The SiteInformationFactory ensures that the returned object is a Singleton
# Usage:
# _exp = getSiteInformation(readpar('experiment')) # or from pilot option
# if _exp:
# _exp.somemethod("Hello")
# else:
# tolog("!!WARNING!!1111!! Failed to instantiate experiment class")
from SiteInformationFactory import SiteInformationFactory
factory = SiteInformationFactory()
_exp = None
try:
siteInformationClass = factory.newSiteInformation(experiment)
except Exception, e:
tolog("!!WARNING!!1114!! SiteInformation factory threw an exception: %s" % (e))
else:
_exp = siteInformationClass()
tolog("getSiteInformation: got experiment=%s" % (_exp.getExperiment()))
return _exp
def dumpPilotInfo(version, pilot_version_tag, pilotId, jobSchedulerId, pilot_initdir, tofile=True):
""" Pilot info """
tolog("PanDA Pilot, version %s" % (version), tofile=tofile, essential=True)
tolog("Version tag = %s" % (pilot_version_tag))
tolog("PilotId = %s, jobSchedulerId = %s" % (str(pilotId), str(jobSchedulerId)), tofile=tofile)
tolog("Current time: %s" % (timeStamp()), tofile=tofile)
tolog("Run by Python %s" % (sys.version), tofile=tofile)
tolog("%s bit OS" % (OSBitsCheck()), tofile=tofile)
tolog("Pilot init dir: %s" % (pilot_initdir), tofile=tofile)
if tofile:
tolog("All output written to file: %s" % (getPilotlogFilename()))
tolog("Pilot executed by: %s" % (commands.getoutput("whoami")), tofile=tofile)
def removePattern(_string, _pattern):
""" Remove the regexp pattern from the given string """
pattern = re.compile(r"%s" % (_pattern))
found = re.findall(pattern, _string)
if len(found) > 0:
_substring = found[0]
tolog("Found regexp string: %s" % (_substring))
_string = _string.replace(_substring, "")
return _string
def isPilotTCPServerAlive(server, port):
""" Verify that the pilot TCP server is still alive """
status = False
import socket
try:
# open the socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except Exception, e:
tolog("!!WARNING!!2911!! Caught a socket/connect exception: %s" % (e))
else:
# try to commucate with the TCP server
s.settimeout(10)
try:
s.connect((server, port))
except Exception, e:
tolog("!!WARNING!!1912!! Caught a socket/connect exception: %s" % (e))
else:
status = True
s.settimeout(None)
s.close()
return status
def encode_string(_string):
""" Encode a string using urlencode """
from urllib import urlencode
# truncate the string in case it is too long
_string = _string[:1024]
# put the string to be encoded in a dictionary
encoded_dict = {"x":_string}
# handle the =-sign (not possible to have since the decoder of the pilot TCP message does a splitting using =-signs)
pre = urlencode(encoded_dict)
encoded_string = pre.replace('x=', '^!^')
return encoded_string
def decode_string(encoded_string):
""" Decode a string using parse_qs """
tolog("Decoding: %s" % (encoded_string))
imported = False
try:
# on modern python, get the parse function from urlparse
from urlparse import parse_qs
except:
pass
else:
imported = True
if not imported:
# on ancient python, get the parse function from cgi
from cgi import parse_qs
# handle the =-sign (put back)
if '^!^' in encoded_string:
encoded_string = encoded_string.replace('^!^', 'x=')
decoded_string = ""
try:
decoded_dict = parse_qs(encoded_string)
except Exception, e:
tolog("!!WARNING!!1234!! Failed to parse URL encoded string: %s" % (encoded_string))
else:
if decoded_dict.has_key('x'):
try:
decoded_string = decoded_dict['x'][0]
except Exception, e:
tolog("!!WARNING!!1234!! Failed to decode URL encoded string: %s" % (encoded_string))
else:
tolog("Empty URL encoded string (Nothing to decode)")
# get rid of useless info
if decoded_string == "^!^":
tolog("Resetting decoded string (TCP ping signal)")
decoded_string = ""
return decoded_string
def stripDQ2FromLFN(lfn):
""" Remove any legacy __DQ2 part of an LFN """
# E.g. LFN = AOD.505307._000001.pool.root.9__DQ2-1315236060
# -> AOD.505307._000001.pool.root.9
# This method assumes that the LFN contains the legacy __DQ2-<nr> substring
pattern = "(\s*)\_\_DQ2\-[0-9]+"
found = re.search(pattern, lfn)
if found:
try:
__DQ2 = found.group(0)
except Exception, e:
tolog("!!WARNING!!1112!! Failed to identify legacy __DQ2 substring: %s" % (e))
else:
lfn = lfn.replace(__DQ2, "")
return lfn
def fastCleanup(workdir, pilot_initdir, rmwkdir):
""" Cleanup the site workdir """
print "fastCleanup() called"
# return to the pilot init dir, otherwise wrapper will not find curl.config
chdir(pilot_initdir)
if rmwkdir or rmwkdir == None:
if os.path.exists(workdir):
try:
rc, rs = commands.getstatusoutput("rm -rf %s" % (workdir))
except Exception, e:
print "!!WARNING!!1999!! Could not remove site workdir: %s, %s" % (workdir, e)
else:
if rc == 0:
print "Removed site workdir: %s" % (workdir)
else:
print "!!WARNING!!1999!! Could not remove site workdir: %s, %d, %s" % (workdir, rc, rs)
s = 3*60
max_attempts = 2
attempt = 0
while attempt < max_attempts:
print "Sleeping %d seconds before trying again (re-attempt %d/%d)" % (s, attempt+1, max_attempts)
time.sleep(s)
try:
rc, rs = commands.getstatusoutput("rm -rf %s" % (workdir))
except Exception, e:
print "!!WARNING!!1999!! Could not remove site workdir: %s, %s" % (workdir, e)
else:
if rc == 0:
print "Removed site workdir: %s" % (workdir)
else:
print "!!WARNING!!1999!! Could not remove site workdir: %s, %d, %s" % (workdir, rc, rs)
dir_list = os.listdir(workdir)
print str(dir_list)
for f in dir_list:
if ".nfs" in f:
fname = os.path.join(workdir, f)
print "Found NFS lock file: %s" % (fname)
cmd = "lsof %s" % (fname)
print "Executing command: %s" % (cmd)
out = commands.getoutput(cmd)
print out
pid = None
pattern = re.compile('sh\s+([0-9]+)')
lines = out.split('\n')
for line in lines:
_pid = pattern.search(line)
if _pid:
pid = _pid.group(1)
break
if pid:
print "Attempting to kill pid=%s" % (pid)
cmd = "kill -9 %s" % (pid)
out = commands.getoutput(cmd)
print out
cmd = 'ps -fwu %s' % (commands.getoutput("whoami"))
print "%s: %s" % (cmd + '\n', commands.getoutput(cmd))
attempt += 1
else:
print "Nothing to cleanup (site workdir does not exist: %s)" % (workdir)
else:
print "rmwkdir flag set to False - will not delete %s" % (workdir)
# flush buffers
sys.stdout.flush()
sys.stderr.flush()
def getStdoutFilename(workdir, preliminary_stdout_filename):
""" Return the proper stdout filename """
# In the case of runGen/runAthena, the preliminary filename should be updated since stdout is redirected at some point
# In the case there are *.log files present, they are of greater interest than the stdout file so the last updated
# one will be chosen instead of the stdout (prod jobs)
# look for *.log files
from FileHandling import findLatestTRFLogFile
filename = findLatestTRFLogFile(workdir)
# fall back to old method identifying the stdout file name
if filename == "":
from glob import glob
# look for redirected stdout
_path = os.path.join(os.path.join(workdir, "workDir"), "tmp.stdout.*")
tolog("path=%s"%(_path))
path_list = glob(_path)
if len(path_list) > 0:
# there should only be one path
tolog("Found redirected stdout: %s" % str(path_list))
filename = path_list[0]
else:
filename = preliminary_stdout_filename
tolog("Using stdout filename: %s" % (filename))
return filename
def getStdoutDictionary(jobDic):
""" Create a dictionary with the tails of all running payloads """
stdout_dictionary = {}
number_of_lines = 20 # tail -20 filename
# loop over all parallel jobs
# (after multitasking was removed from the pilot, there is actually only one job)
for k in jobDic.keys():
jobId = jobDic[k][1].jobId
# abort if not debug mode, but save an empty entry in the dictionary
if not jobDic[k][1].debug:
stdout_dictionary[jobId] = ""
continue
# is this a multi-trf job?
nJobs = jobDic[k][1].jobPars.count("\n") + 1
for _i in range(nJobs):
_stdout = jobDic[k][1].stdout
if nJobs > 1:
_stdout = _stdout.replace(".txt", "_%d.txt" % (_i + 1))
#filename = "%s/%s" % (jobDic[k][1].workdir, _stdout)
# _stdout is the preliminary filename, but can be different, e.g. runGen/runAthena redirects stdout
filename = getStdoutFilename(jobDic[k][1].workdir, _stdout)
if os.path.exists(filename):
try:
# get the tail
cmd = "tail -%d %s" % (number_of_lines, filename)
tolog("Executing command: %s" % (cmd))
stdout = commands.getoutput(cmd)
except Exception, e:
tolog("!!WARNING!!1999!! Tail command threw an exception: %s" % (e))
stdout_dictionary[jobId] = "(no stdout, caught exception: %s)" % (e)
else:
if stdout == "":
tolog("!!WARNING!!1999!! Tail revealed empty stdout for file %s" % (filename))
stdout_dictionary[jobId] = "(no stdout)"
else:
# add to tail dictionary
stdout_dictionary[jobId] = stdout
# also keep track of the path to the stdout so we can send it to a text indexer if required
index = "path-%s" % (jobId)
stdout_dictionary[index] = filename
tolog("Stored path=%s at index %s" % (stdout_dictionary[index], index))
# add the number of lines (later this should always be sent)
pattern = re.compile(r"(\d+) [\S+]")
cmd = "wc -l %s" % (filename)
try:
_nlines = commands.getoutput(cmd)
except Exception, e:
pilotErrorDiag = "wc command threw an exception: %s" % (e)
tolog("!!WARNING!!1999!! %s" % (pilotErrorDiag))
nlines = pilotErrorDiag
else:
try:
nlines = re.findall(pattern, _nlines)[0]
except Exception, e:
pilotErrorDiag = "re.findall threw an exception: %s" % (e)
tolog("!!WARNING!!1999!! %s" % (pilotErrorDiag))
nlines = pilotErrorDiag
stdout_dictionary[jobId] += "\n[%s]" % (nlines)
else:
tolog("(Skipping tail of payload stdout file (%s) since it has not been created yet)" % (os.path.basename(filename)))
stdout_dictionary[jobId] = "(stdout not available yet)"
tolog("Returning tail stdout dictionary with %d entries" % len(stdout_dictionary.keys()))
return stdout_dictionary
def getStagingRetry(staging):
""" Return a proper stage-in/out retry option """
if staging == "stage-in":
_STAGINGRETRY = readpar("stageinretry")
_stagingTries = env['stageinretry'] # default value (2)
else:
_STAGINGRETRY = readpar("stageoutretry")
_stagingTries = env['stageoutretry'] # default value (2)
if _STAGINGRETRY != "":
try:
_stagingTries = int(_STAGINGRETRY)
except Exception, e:
tolog("!!WARNING!!1113!! Problematic %s retry number: %s, %s" % (staging, _STAGINGRETRY, e))
else:
stagingTries = _stagingTries
tolog("Updated %s retry number to %d" % (staging, stagingTries))
else:
stagingTries = _stagingTries
tolog("Updated %s retry number to %d" % (staging, stagingTries))
return stagingTries
def handleQueuedata(_queuename, _pshttpurl, error, thisSite, _jobrec, _experiment, forceDownload = False, forceDevpilot = False):
""" handle the queuedata download and post-processing """
tolog("Processing queuedata")
# get the site information object
si = getSiteInformation(_experiment)
# get the experiment object
thisExperiment = getExperiment(_experiment)
# (re-)download the queuedata
ec, hasQueuedata = si.getQueuedata(_queuename, forceDownload=forceDownload, url=_pshttpurl)
if ec != 0:
return ec, thisSite, _jobrec, hasQueuedata
# Get the new queuedata file from AGIS (unless it already exists)
#try:
# s = si.getNewQueuedata(_queuename, overwrite=False)
#except Exception, e:
# tolog("!!WARNING!!1212!! Exception caught: %s" % (e))
if hasQueuedata:
# update queuedata and thisSite if necessary
ec, _thisSite, _jobrec = si.postProcessQueuedata(_queuename, _pshttpurl, thisSite, _jobrec, forceDevpilot)
if ec != 0:
return error.ERR_GENERALERROR, thisSite, _jobrec, hasQueuedata
else:
thisSite = _thisSite
# should the server or the pilot do the LFC registration?
if readpar("lfcregister") == "server":
tolog("File registration will be done by server")
# special check for storm sites
_copytool = readpar('copytool')
_copytoolin = readpar('copytoolin')
if _copytool == "storm" and _copytoolin == "":
_copytool = "lcgcp2"
_copytoolin = "storm"
tolog("!!WARNING!!1112!! Found schedconfig misconfiguration: Forcing queuedata update for storm site: copytool=%s, copytoolin=%s" % (_copytool, _copytoolin))
ec = si.replaceQueuedataField("copytool", _copytool)
ec = si.replaceQueuedataField("copytoolin", _copytoolin)
else:
# since lfcregister is not set, make sure that copytool is not set to lcgcp2
if readpar("copytool") == "lcgcp2" or readpar("copytool") == "lcg-cp2" and readpar('region') != 'US':
tolog("!!FAILED!!1111!! Found schedconfig misconfiguration: Site cannot use copytool=lcgcp2 without lfcregister=server")
return error.ERR_GENERALERROR, thisSite, _jobrec, hasQueuedata
tolog("File catalog registration no longer supported by pilot")
return error.ERR_GENERALERROR, thisSite, _jobrec, hasQueuedata
# should the number of stage-in/out retries be updated?
env['stageinretry'] = getStagingRetry("stage-in")
env['stageoutretry'] = getStagingRetry("stage-out")
# does the application directory exist?
ec = thisExperiment.verifySwbase(readpar('appdir'))
if ec != 0:
return ec, thisSite, _jobrec, hasQueuedata
# update experiment for Nordugrid
global experiment
if os.environ.has_key('Nordugrid_pilot'):
experiment = "Nordugrid-ATLAS"
# reset site.appdir
thisSite.appdir = readpar('appdir')
if readpar('glexec') == "True":
env['glexec'] = 'True'
elif readpar('glexec') == "test":
env['glexec'] = 'test'
else:
env['glexec'] = 'False'
return ec, thisSite, _jobrec, hasQueuedata
def postJobTask(job, thisSite, thisNode, experiment, jr=False, ra=0, stdout_tail=None, stdout_path=None):
"""
update Panda server with output info (xml) and make/save the tarball of the job workdir,
only for finished or failed jobs.
jr = job recovery
ra = recovery attempt
"""
# create and instantiate the job log object
from JobLog import JobLog
joblog = JobLog()
# create the log
joblog.postJobTask(job, thisSite, experiment, thisNode, jr=jr, ra=ra, stdout_tail=stdout_tail, stdout_path=stdout_path)
def verifyRecoveryDir(recoveryDir):
"""
make sure that the recovery directory actually exists
"""
# does the recovery dir actually exists?
if recoveryDir != "":
if os.path.exists(recoveryDir):
tolog("Recovery directory exists: %s" % (recoveryDir))
else:
tolog("!!WARNING!!1190!! Recovery directory does not exist: %s (will not be used)" % (recoveryDir))
recoveryDir = ""
return recoveryDir
def removeTestFiles(job_state_files, mode="default"):
"""
temporary code for removing test files or standard job state files
"""
# for mode="default", normal jobState-<jobId>.pickle files will be returned
# for mode="test", jobState-<jobId>-test.pickle files will be returned
new_job_state_files = []
if mode == "default":
for f in job_state_files:
if not "-test.pickle" in f:
new_job_state_files.append(f)
else:
for f in job_state_files:
if "-test.pickle" in f:
new_job_state_files.append(f)
return new_job_state_files
def moveToExternal(workdir, recoveryDir):
"""
move job state file(s), and remaining log/work dir(s) to an external dir for later recovery
also updates the job state file with the new info
"""
from JobState import JobState
status = True
# make sure the recovery directory actually exists
recoveryDir = verifyRecoveryDir(recoveryDir)
if recoveryDir == "":
tolog("!!WARNING!!1190!! verifyRecoveryDir failed")
return False
tolog("Using workdir: %s, recoveryDir: %s" % (workdir, recoveryDir))
JS = JobState()
# grab all job state files from the workdir
from glob import glob
job_state_files = glob("%s/jobState-*.pickle" % (workdir))
# purge any test job state files (testing for new job rec algorithm)
job_state_files = removeTestFiles(job_state_files, mode="default")
_n = len(job_state_files)
tolog("Number of found jobState files: %d" % (_n))
if _n == 0:
return False
for job_state_file in job_state_files:
# read back all job info n order to update it with the new recovery dir info
if JS.get(job_state_file):
# decode the job state info
_job, _site, _node, _recoveryAttempt = JS.decode()
_basenameSiteWorkdir = os.path.basename(_site.workdir)
_basenameJobWorkdir = os.path.basename(_job.workdir)
_basenameJobDatadir = os.path.basename(_job.datadir)
siteworkdir = _site.workdir
# create the site work dir on the external disk
externalDir = "%s/%s" % (recoveryDir, _basenameSiteWorkdir)
tolog("Using external dir: %s" % (externalDir))
# does the external dir already exist? (e.g. when $HOME is used)
if os.path.exists(externalDir):
tolog("External dir already exists")
else:
# group rw permission added as requested by LYON
# ec, rv = commands.getstatusoutput("mkdir -m g+rw %s" % (externalDir))
# 770 at the request of QMUL/Alessandra Forti?
ec, rv = commands.getstatusoutput("mkdir -m 770 %s" % (externalDir))
if ec != 0:
if rv.find("changing permissions") >= 0 and rv.find("Operation not permitted") >= 0:
tolog("!!WARNING!!1190!! Was not allowed to created recovery dir with g+rw")
if os.path.exists(externalDir):
tolog("!!WARNING!!1190!! Recovery dir was nevertheless created: %s (will continue)" % (externalDir))
else:
tolog("!!WARNING!!1190!! Could not create dir on external disk: %s" % (rv))
return False
else:
tolog("!!WARNING!!1190!! Could not create dir on external disk: %s" % (rv))
return False
else:
tolog("Successfully created external dir with g+rw")
logfile = os.path.join(_site.workdir, _job.logFile)
logfile_copied = os.path.join(_site.workdir, "LOGFILECOPIED")
logfile_registered = os.path.join(_site.workdir, "LOGFILEREGISTERED")
metadatafile1 = "metadata-%s.xml" % (_job.jobId)
metadatafile2 = "metadata-%s.xml.PAYLOAD" % (_job.jobId)
from FileHandling import getExtension
surlDictionary = os.path.join(_site.workdir, "surlDictionary-%s.%s" % (_job.jobId, getExtension()))
moveDic = {"workdir" : _job.workdir, "datadir" : _job.datadir, "logfile" : logfile, "logfile_copied" : logfile_copied,
"logfile_registered" : logfile_registered, "metadata1" : metadatafile1,
"metadata2" : metadatafile2, "surlDictionary" : surlDictionary }
tolog("Using moveDic: %s" % str(moveDic))
failures = 0
successes = 0
for item in moveDic.keys():
# does the item actually exists?
# (e.g. the workdir should be tarred into the log and should not exist at this point)
if os.path.exists(moveDic[item]):
# move the work dir to the external dir
ec, rv = commands.getstatusoutput("mv %s %s" % (moveDic[item], externalDir))
if ec != 0:
tolog("!!WARNING!!1190!! Could not move item (%s) to external dir (%s): %s" % (moveDic[item], externalDir, rv))
failures += 1
else:
tolog("Moved holding job item (%s) to external dir (%s)" % (moveDic[item], externalDir))
successes += 1
# set a new path for the item
if item == "workdir":
# probably the work dir has already been tarred
_job.workdir = os.path.join(recoveryDir, _basenameJobWorkdir)
tolog("Updated job workdir: %s" % (_job.workdir))
elif item == "datadir":
_job.datadir = os.path.join(externalDir, _basenameJobDatadir)
tolog("Updated job datadir: %s" % (_job.datadir))
else:
tolog("(Nothing to update in job state file for %s)" % (item))
else:
# if the log is present, there will not be a workdir
tolog("Item does not exist: %s" % (moveDic[item]))
# set a new path for the item
if item == "workdir":
# probably the work dir has already been tarred
_job.workdir = os.path.join(recoveryDir, _basenameJobWorkdir)
tolog("Updated job workdir: %s" % (_job.workdir))
# update the job state file with the new state information if any move above was successful
if successes > 0:
_site.workdir = externalDir
tolog("Updated site workdir: %s" % (_site.workdir))
_retjs = updateJobState(_job, _site, _node)
if not _retjs:
tolog("!!WARNING!!1190!! Could not create job state file in external dir: %s" % (externalDir))
tolog("!!WARNING!!1190!! updateJobState failed at critical stage")
failures += 1
else:
tolog("Created a new job state file in external dir")
# remove the LOCKFILE since it can disturb any future recovery
if os.path.exists("%s/LOCKFILE" % (siteworkdir)):
ec, rv = commands.getstatusoutput("rm %s/LOCKFILE" % (siteworkdir))
if ec != 0:
tolog("!!WARNING!!1190!! Could not remove LOCKFILE - can disturb future recovery")
else:
tolog("Removed LOCKFILE from work dir: %s" % (siteworkdir))
if failures > 0:
tolog("!!WARNING!!1190!! Since at least one move to the external disk failed, the original work area")
tolog("!!WARNING!!1190!! will not be removed and should be picked up by a later pilot doing the recovery")
status = False
else:
tolog("All files were successfully transferred to the external recovery area")
else:
tolog("!!WARNING!!1190!! Could not open job state file: %s" % (job_state_file))
status = False
return status
def cleanup(wd, initdir, wrflag, rmwkdir):
""" cleanup function """
tolog("Overall cleanup function is called")
# collect any zombie processes
wd.collectZombieJob(tn=10)
tolog("Collected zombie processes")
# get the current work dir
wkdir = readStringFromFile(os.path.join(initdir, "CURRENT_SITEWORKDIR"))
# is there an exit code?
ec = readCodeFromFile(os.path.join(wkdir, "EXITCODE"))
# is there a process id
pid = readCodeFromFile(os.path.join(wkdir, "PROCESSID"))
if pid != 0:
tolog("Found process id %d in PROCESSID file, will now attempt to kill all of its subprocesses" % (pid))
killProcesses(pid, os.getpgrp())
if rmwkdir == None or rmwkdir == False:
# in case of multi-jobs, the workdir will already be deleted
if os.path.exists(wkdir):
# does the job work dir contain a lock file for this job?
if os.path.exists("%s/LOCKFILE" % (wkdir)):
tolog("Lock file found: will not delete %s!" % (wkdir))
lockfile = True
try:
os.system("chmod -R g+w %s" % (initdir))
except Exception, e:
tolog("Failed to chmod pilot init dir: %s" % e)
pass
else:
tolog("Successfully changed permission on pilot init dir (for later pilots that may be run by different users)")
else:
lockfile = False
# remove the work dir only when there are no job state files
if not lockfile and rmwkdir == None:
tolog("Attempting to remove the pilot workdir %s now!" % (wkdir))
try:
chdir(initdir)
os.system("rm -rf %s" % (wkdir))
except Exception, e:
tolog("!!WARNING!!1000!! Failed to remove pilot workdir: %s" % e)
else:
setPilotlogFilename("%s/pilotlog-last.txt" % (initdir))
else:
if lockfile:
# check if the workdir+job state file should be moved to an external directory
# check queuedata for external recovery directory
recoveryDir = ""
try:
recoveryDir = readpar('recoverdir')
except:
pass
else:
if recoveryDir != "":
if not moveToExternal(wkdir, recoveryDir):
tolog("Will not cleanup work area since move to external area at least partially failed")
else:
# cleanup work dir unless user do not want to
if rmwkdir == None:
tolog("Removing the pilot workdir %s now! " % (wkdir))
try:
chdir("/")
os.system("rm -rf %s" % (wkdir))
except Exception, e:
tolog("!!WARNING!!1000!! Failed to remove pilot workdir: %s" % e)
try:
os.system("chmod -R g+w %s" % (recoveryDir))
except Exception, e:
tolog("Failed to chmod recovery dir: %s" % e)
pass
else:
tolog("Successfully changed permission on external recovery dir (for later pilots that may be run by different users)")
if rmwkdir == False:
tolog("rmwkdir flag set to False - will not delete %s" % (wkdir))
else:
tolog("Work dir already deleted by multi-job loop: %s" % (wkdir))
# always remove the workdir if the rmwkdir was set at the pilot launch
elif rmwkdir:
if os.path.exists(wkdir):
tolog("Removing the pilot workdir %s now! " % (wkdir))
try:
chdir("/")
os.system("rm -rf %s" % (wkdir))
except Exception,e:
tolog("!!WARNING!!1000!! Failed to remove pilot workdir: %s" % e)
else:
tolog("Work dir already deleted by multi-job loop: %s" % (wkdir))
else:
tolog("rmwkdir flag set to False - will not delete %s" % (wkdir))
tolog("Pilot cleanup has finished")
# wait for the stdout to catch up (otherwise the full log is cut off in the batch stdout dump)
time.sleep(10)
# return exit code to wrapper (or caller, runMain())
if wrflag:
tolog("Done, returning %d to wrapper" % (ec))
# flush buffers
sys.stdout.flush()
sys.stderr.flush()
return shellExitCode(ec)
else:
tolog("Done, using system exit to quit")
# flush buffers
sys.stdout.flush()
sys.stderr.flush()
os._exit(0) # need to call this to clean up the socket, thread etc resources
def shellExitCode(exitCode):
""" Translate the pilot exit code to a proper exit code for the shell """
# get error handler
error = PilotErrors()
# Error code translation dictionary
# FORMAT: { pilot_error_code : [ shell_error_code, meaning ], .. }
# Restricting user (pilot) exit codes to the range 64 - 113, as suggested by http://tldp.org/LDP/abs/html/exitcodes.html
# Using exit code 137 for kill signal error codes (this actually means a hard kill signal 9, (128+9), 128+2 would mean CTRL+C)
error_code_translation_dictionary = {
-1 : [64, "Site offline"],
error.ERR_GENERALERROR : [65, "General pilot error, consult batch log"],
error.ERR_MKDIRWORKDIR : [66, "Could not create directory"],
error.ERR_NOSUCHFILE : [67, "No such file or directory"],
error.ERR_NOVOMSPROXY : [68, "Voms proxy not valid"],
error.ERR_NOLOCALSPACE : [69, "No space left on local disk"],
error.ERR_PILOTEXC : [70, "Exception caught by pilot"],
error.ERR_QUEUEDATA : [71, "Pilot could not download queuedata"],
error.ERR_QUEUEDATANOTOK : [72, "Pilot found non-valid queuedata"],
error.ERR_NOSOFTWAREDIR : [73, "Software directory does not exist"],
error.ERR_KILLSIGNAL : [137, "General kill signal"], # Job terminated by unknown kill signal
error.ERR_SIGTERM : [143, "Job killed by signal: SIGTERM"], # 128+15
error.ERR_SIGQUIT : [131, "Job killed by signal: SIGQUIT"], # 128+3
error.ERR_SIGSEGV : [139, "Job killed by signal: SIGSEGV"], # 128+11
error.ERR_SIGXCPU : [158, "Job killed by signal: SIGXCPU"], # 128+30
error.ERR_SIGUSR1 : [144, "Job killed by signal: SIGUSR1"], # 128+16
error.ERR_SIGBUS : [138, "Job killed by signal: SIGBUS"] # 128+10
}
if error_code_translation_dictionary.has_key(exitCode):
return error_code_translation_dictionary[exitCode][0] # Only return the shell exit code, not the error meaning
elif exitCode != 0:
tolog("!!WARNING!!1234!! No translation to shell exit code for error code %d" % (exitCode))
return 1
else:
return 0
def updatePandaServer(job, xmlstr=None, spaceReport=False, log=None, ra=0, jr=False, stdout_tail="", stdout_path=""):
""" Update the panda server with the latest job info """
# create and instantiate the client object
from PandaServerClient import PandaServerClient
client = PandaServerClient(pilot_version = env['version'], pilot_version_tag = env['pilot_version_tag'],
pilot_initdir = env['pilot_initdir'], jobSchedulerId = env['jobSchedulerId'],
pilotId = env['pilotId'], updateServer = env['updateServerFlag'],
jobrec = env['jobrec'], pshttpurl = env['pshttpurl'])
# update the panda server
return client.updatePandaServer(job, env['thisSite'], env['workerNode'], env['psport'],
xmlstr = xmlstr, spaceReport = spaceReport, log = log, ra = ra, jr = jr,
useCoPilot = env['useCoPilot'], stdout_tail = stdout_tail, stdout_path = stdout_path)
def sig2exc(sig, frm):
""" Signal handler """
error = PilotErrors()
errorText = "!!FAILED!!1999!! [pilot] Signal %s is caught in pilot parent process!" % str(sig)
tolog(errorText)
ec = error.ERR_KILLSIGNAL
# send to stderr
print >> sys.stderr, errorText
# here add the kill function to kill all the real jobs processes
for k in env['jobDic'].keys():
tmp = env['jobDic'][k][1].result[0]
if tmp != "finished" and tmp != "failed" and tmp != "holding":
if sig == signal.SIGTERM:
ec = error.ERR_SIGTERM
elif sig == signal.SIGQUIT:
ec = error.ERR_SIGQUIT
elif sig == signal.SIGSEGV:
ec = error.ERR_SIGSEGV
elif sig == signal.SIGXCPU:
ec = error.ERR_SIGXCPU
elif sig == signal.SIGBUS:
ec = error.ERR_SIGBUS
elif sig == signal.SIGUSR1:
ec = error.ERR_SIGUSR1
else:
ec = error.ERR_KILLSIGNAL
env['jobDic'][k][1].result[0] = "failed"
env['jobDic'][k][1].subStatus = 'pilot_killed'
env['jobDic'][k][1].currentState = env['jobDic'][k][1].result[0]
# do not overwrite any previous error
if env['jobDic'][k][1].result[2] == 0:
env['jobDic'][k][1].result[2] = ec
if not env['logTransferred']:
env['jobDic'][k][1].pilotErrorDiag = "Job killed by signal %s: Signal handler has set job result to FAILED, ec = %d" %\
(str(sig), ec)
logMsg = "!!FAILED!!1999!! %s\n%s" % (env['jobDic'][k][1].pilotErrorDiag, env['version'])
tolog(logMsg)
ret, retNode = updatePandaServer(env['jobDic'][k][1], log = logMsg)
if ret == 0:
tolog("Successfully updated panda server at %s" % timeStamp())
else:
# log should have been transferred
env['jobDic'][k][1].pilotErrorDiag = "Job killed by signal %s: Signal handler has set job result to FAILED, ec = %d" %\
(str(sig), ec)
logMsg = "!!FAILED!!1999!! %s\n%s" % (env['jobDic'][k][1].pilotErrorDiag, env['version'])
tolog(logMsg)
killProcesses(env['jobDic'][k][0], env['jobDic'][k][1].pgrp)
# most of the time there is not enough time to build the log
# postJobTask(env['jobDic'][k][1], globalSite, globalWorkNode, env['experiment'], jr=False)
# touch a KILLED file which will be seen by the multi-job loop, to prevent further jobs from being started
try:
createLockFile(False, env['thisSite'].workdir, "KILLED")
writeToFile(os.path.join(env['thisSite'].workdir, "EXITCODE"), str(ec))
except Exception, e:
tolog("!!WARNING!!2211!! Caught exception: %s" % (e))
raise SystemError(sig) # this one will trigger the cleanup function to be called
def extractPattern(source, pattern):
""" Extract 'pattern' from 'source' using a regular expression """
# Examples
# surl = "srm://dcache-se-atlas.desy.de/pnfs/desy.de/atlas/dq2/atlasdatadisk/rucio/mc12_8TeV/3e/51/NTUP_SMWZ.01355318._000001.root.1"
# extractPattern(surl, r'\/rucio\/.+(\/[a-zA-Z0-9]{2}\/[a-zA-Z0-9]{2}\/)') -> "/3e/51/"
# extractPattern(surl, r'\/rucio\/(.+)\/[a-zA-Z0-9]{2}\/[a-zA-Z0-9]{2}\/') -> "mc12_8TeV"
extracted = ""
_pattern = re.compile(pattern)
_extracted = re.findall(_pattern, source)
if _extracted != []:
extracted = _extracted[0]
return extracted
def getEventService(experiment):
""" Return a reference to an EventService class """
# The EventServiceFactory ensures that the returned object is a Singleton
# Usage:
# _exp = getEventService(readpar('experiment')) # or from pilot option
# if _exp:
# _exp.somemethod("Hello")
# else:
# tolog("!!WARNING!!1111!! Failed to instantiate EventService class")
from EventServiceFactory import EventServiceFactory
factory = EventServiceFactory()
_exp = None
try:
eventServiceClass = factory.newEventService(experiment)
except Exception, e:
tolog("!!WARNING!!1114!! EventService factory threw an exception: %s" % (e))
else:
_exp = eventServiceClass()
return _exp
def isValidGUID(guid):
""" Verify the GUID generated with uuidgen """
status = False
pattern = "[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}"
m = re.search(pattern, guid.upper())
if not m:
tolog("!!WARNING!!2333!! GUID=\'%s\' does not follow pattern \'%s\'" % (guid, pattern))
else:
status = True
return status
def getGUID():
""" Return a GUID generated with uuidgen """
guid = commands.getoutput('uuidgen 2> /dev/null')
# Make sure that there was no problem piping to dev null, ie that the GUID is proper
if not isValidGUID(guid):
tolog("Trying uuidgen without pipe to /dev/null")
guid = commands.getoutput('uuidgen')
if not isValidGUID(guid):
tolog("!!WARNING!!2233!! Failed to generate GUID")
guid = ""
return guid
def extractHPCInfo(infoStr):
""" Extract HPC name from the info string """
# Return: isHPCSite (True/False), HPC_name (string)
# infoStr = "blabla HPC_Titan" -> True, "Titan"
# infoStr = "blabla bla" -> False, None
# The HPC name will be capitalized (titan -> Titan)
name = None
isHPCSite = False
m = re.search('HPC\_([A-Za-z0-9]+)', infoStr)
if m:
name = m.group(1)
name = name.capitalize()
isHPCSite = True
return isHPCSite, name
def getInitialDirs(path, n):
""" Get the initial n sub directories in a given path """
# E.g. path = "/cvmfs/atlas-nightlies.cern.ch/repo/sw/nightlies", n = 3
# -> subpath = "/cvmfs/atlas-nightlies.cern.ch/repo"
subpath = ""
if path[0] == "/":
s = path.split("/")
if n <= len(s):
subpath = "/"
for i in range(1, n+1):
subpath = os.path.join(subpath, s[i])
else:
subpath = path
else:
tolog("!!WARNING!!2211!! Not a path: %s" % (path))
return subpath
def convert(data):
""" Convert unicode data to utf-8 """
# Dictionary:
# data = {u'Max': {u'maxRSS': 3664, u'maxSwap': 0, u'maxVMEM': 142260, u'maxPSS': 1288}, u'Avg': {u'avgVMEM': 94840, u'avgPSS': 850, u'avgRSS': 2430, u'avgSwap': 0}}
# convert(data)
# {'Max': {'maxRSS': 3664, 'maxSwap': 0, 'maxVMEM': 142260, 'maxPSS': 1288}, 'Avg': {'avgVMEM': 94840, 'avgPSS': 850, 'avgRSS': 2430, 'avgSwap': 0}}
# String:
# data = u'hello'
# convert(data)
# 'hello'
# List:
# data = [u'1',u'2','3']
# convert(data)
# ['1', '2', '3']
import collections
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
def dumpFile(filename, topilotlog=False):
""" dump a given file to stdout or to pilotlog """
if os.path.exists(filename):
tolog("Dumping file: %s (size: %d)" % (filename, os.path.getsize(filename)))
try:
f = open(filename, "r")
except IOError, e:
tolog("!!WARNING!!4000!! Exception caught: %s" % (e))
else:
i = 0
for line in f.readlines():
i += 1
line = line.rstrip()
if topilotlog:
tolog("%s" % (line))
else:
print "%s" % (line)
f.close()
tolog("Dumped %d lines from file %s" % (i, filename))
else:
tolog("!!WARNING!!4000!! %s does not exist" % (filename))
def tryint(x):
""" Used by numbered string comparison (to protect against unexpected letters in version number) """
try:
return int(x)
except ValueError:
return x
def split_version(s):
""" Splits version string into parts and converts parts into integers wherever possible.
split_version("1.2.3") = (1,2,3)
split_version("1.2.Nightly") = (1,2,"Nightly")
"""
# Can also be used for sorting:
# > names = ['YT4.11', '4.3', 'YT4.2', '4.10', 'PT2.19', 'PT2.9']
# > sorted(names, key=splittedname)
# ['4.3', '4.10', 'PT2.9', 'PT2.19', 'YT4.2', 'YT4.11']
from re import split
return tuple(tryint(x) for x in split('([^.]+)', s))
def isAGreaterOrEqualToB(A, B):
""" Is numbered string A >= B?
"1.2.3" > "1.2" -- more digits
"1.2.3" > "1.2.2" -- rank based comparison
"1.3.2" > "1.2.3" -- rank based comparison
"1.2.N" > "1.2.2" -- nightlies checker, they are always grater
"""
# > a="1.2.3"
# > b="2.2.2"
# > e.isAGreaterOrEqualToB(a,b)
return split_version(A) >= split_version(B)
def recursive_overwrite(src, dest, ignore=None):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(src)
if ignore is not None:
ignored = ignore(src, files)
else:
ignored = set()
for f in files:
if f not in ignored:
recursive_overwrite(os.path.join(src, f),
os.path.join(dest, f),
ignore)
else:
shutil.copyfile(src, dest)
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def merge_dictionaries(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict.
precedence goes to key value pairs in latter dicts
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def getPooFilenameFromJobPars(jobPars):
""" Extract the @poo filename from the job parameters """
filename = ""
pattern = re.compile(r"\@(\S+)")
found = re.findall(pattern, jobPars)
if len(found) > 0:
filename = found[0]
return filename
def updateInputFileWithTURLs(jobPars, LFN_to_TURL_dictionary):
""" Update the @poo input file list with TURLs """
status = False
# First try to get the @poo filename (which actually contains the full local path to the file)
filename = getPooFilenameFromJobPars(jobPars)
if filename != "":
if os.path.exists(filename):
try:
f = open(filename, "r")
except IOError, e:
tolog("!!WARNING!!2997!! Caught exception: %s" % (e))
else:
# Read file
lines = f.readlines()
f.close()
# Process file
turls = []
header = lines[0]
for lfn in lines:
# Note: the 'lfn' is actually a local file path, but will end with an \n
lfn = os.path.basename(lfn)
try:
# Try to get the corresponding dictionary entry (assume there is a corresponding entry in the actual TURL dictionary)
if lfn.endswith('\n'):
lfn = lfn[:-1]
turl = LFN_to_TURL_dictionary[lfn]
except:
pass
else:
turls.append(turl)
if turls != []:
# Overwrite the old file with updated TURL info
try:
f = open(filename, "w")
except IOError, e:
tolog("!!WARNING!!2997!! Caught exception: %s" % (e))
else:
# Write file
f.write(header)
for turl in turls:
f.write(turl + "\n")
# Process file
f.close()
status = True
else:
tolog("!!WARNING!!2998!! Failed to extract TURLs (empty TURL list)")
else:
tolog("!!WARNING!!2342!! File not found: %s" % (filename))
else:
tolog("!!WARNING!!2343!! Found no @input filename in jobPars: %s" % (jobPars))
| {
"content_hash": "2b27c156aa07dd8b8544447352e76daf",
"timestamp": "",
"source": "github",
"line_count": 4704,
"max_line_length": 225,
"avg_line_length": 37.37308673469388,
"alnum_prop": 0.5488188483700506,
"repo_name": "anisyonk/pilot",
"id": "676ad80c24f607a0765fb929f03e5c664a82a536",
"size": "175803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pUtil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4971061"
},
{
"name": "Shell",
"bytes": "23530"
}
],
"symlink_target": ""
} |
master_doc = "index"
extensions = ["sphinx.ext.autodoc", "uqbar.sphinx.book"]
html_static_path = ["_static"]
uqbar_book_console_setup = ["import supriya"]
uqbar_book_extensions = [
"uqbar.book.extensions.GraphExtension",
"supriya.ext.book.RenderExtension",
]
uqbar_book_strict = True
uqbar_book_use_black = True
| {
"content_hash": "8ecd263ba6373cc5ceede741a4241b77",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.7120743034055728,
"repo_name": "Pulgama/supriya",
"id": "25b389c43b288b81bb100b053869061277dc9df7",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/roots/test-book/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
} |
from tools import lines
import unittest
import os.path
class TestLines(unittest.TestCase):
FILE = os.path.join(os.path.dirname(__file__), "../modules/resources/bag.txt")
N = 100
def test_LinesSeq(self):
self.do_test(lines.LinesSeq)
def test_LinesSeqRnd(self):
self.do_test(lines.LinesSeqRnd)
def do_test(self, cl):
l = cl(self.FILE)
for i in range(self.N):
self.assertTrue(len(next(l)) > 0)
l = cl(["line %d" % i for i in range(int(self.N/10))])
for i in range(self.N):
self.assertTrue(len(next(l)) > 0)
| {
"content_hash": "cc25d17cdd5ec5efcd507ec804477d04",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.595,
"repo_name": "mamaddeveloper/telegrambot",
"id": "9c0b1d0bab015ecead4acc82e7611c2f826c5cf8",
"size": "600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/testLines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91174"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
} |
import logging
import urllib2
try:
from urllib2 import quote as urllib_quote
except ImportError:
from urllib import quote as urllib_quote
from reviewboard.diffviewer.parser import DiffParser, DiffParserError
from reviewboard.scmtools.git import GitDiffParser
from reviewboard.scmtools.core import \
FileNotFoundError, SCMTool, HEAD, PRE_CREATION, UNKNOWN
class HgTool(SCMTool):
name = "Mercurial"
supports_authentication = True
dependencies = {
'modules': ['mercurial'],
}
def __init__(self, repository):
SCMTool.__init__(self, repository)
if repository.path.startswith('http'):
self.client = HgWebClient(repository.path,
repository.username,
repository.password)
else:
self.client = HgClient(repository.path, repository.local_site)
self.uses_atomic_revisions = True
self.diff_uses_changeset_ids = True
def get_file(self, path, revision=HEAD):
return self.client.cat_file(path, str(revision))
def parse_diff_revision(self, file_str, revision_str):
revision = revision_str
if file_str == "/dev/null":
revision = PRE_CREATION
if not revision_str:
revision = UNKNOWN
return file_str, revision
def get_diffs_use_absolute_paths(self):
return True
def get_fields(self):
return ['diff_path', 'parent_diff_path']
def get_parser(self, data):
if data.lstrip().startswith('diff --git'):
return GitDiffParser(data)
else:
return HgDiffParser(data)
class HgDiffParser(DiffParser):
"""
This class is able to extract Mercurial changeset ids, and
replaces /dev/null with a useful name
"""
newChangesetId = None
origChangesetId = None
isGitDiff = False
def parse_special_header(self, linenum, info):
diffLine = self.lines[linenum].split()
# git style diffs are supported as long as the node ID and parent ID
# are present in the patch header
if self.lines[linenum].startswith("# Node ID") and len(diffLine) == 4:
self.newChangesetId = diffLine[3]
elif self.lines[linenum].startswith("# Parent") and len(diffLine) == 3:
self.origChangesetId = diffLine[2]
elif self.lines[linenum].startswith("diff -r"):
# diff between two revisions are in the following form:
# "diff -r abcdef123456 -r 123456abcdef filename"
# diff between a revision and the working copy are like:
# "diff -r abcdef123456 filename"
self.isGitDiff = False
try:
# ordinary hg diffs don't record renames, so
# new file always == old file
isCommitted = len(diffLine) > 4 and diffLine[3] == '-r'
if isCommitted:
nameStartIndex = 5
info['newInfo'] = diffLine[4]
else:
nameStartIndex = 3
info['newInfo'] = "Uncommitted"
info['newFile'] = info['origFile'] = \
' '.join(diffLine[nameStartIndex:])
info['origInfo'] = diffLine[2]
info['origChangesetId'] = diffLine[2]
except ValueError:
raise DiffParserError("The diff file is missing revision "
"information", linenum)
linenum += 1;
elif self.lines[linenum].startswith("diff --git") and \
self.origChangesetId and diffLine[2].startswith("a/") and \
diffLine[3].startswith("b/"):
# diff is in the following form:
# "diff --git a/origfilename b/newfilename"
# possibly followed by:
# "{copy|rename} from origfilename"
# "{copy|rename} from newfilename"
self.isGitDiff = True
info['origInfo'] = info['origChangesetId' ] = self.origChangesetId
if not self.newChangesetId:
info['newInfo'] = "Uncommitted"
else:
info['newInfo'] = self.newChangesetId
info['origFile'] = diffLine[2][2:]
info['newFile'] = diffLine[3][2:]
linenum += 1
return linenum
def parse_diff_header(self, linenum, info):
if not self.isGitDiff:
if linenum <= len(self.lines) and \
self.lines[linenum].startswith("Binary file "):
info['binary'] = True
linenum += 1
if self._check_file_diff_start(linenum, info):
linenum += 2
else:
while linenum < len(self.lines):
if self._check_file_diff_start(linenum, info ):
self.isGitDiff = False
linenum += 2
return linenum
line = self.lines[linenum]
if line.startswith("Binary file") or \
line.startswith("GIT binary"):
info['binary'] = True
linenum += 1
elif line.startswith("copy") or \
line.startswith("rename") or \
line.startswith("new") or \
line.startswith("old") or \
line.startswith("deleted") or \
line.startswith("index"):
# Not interested, just pass over this one
linenum += 1
else:
break
return linenum
def _check_file_diff_start(self, linenum, info):
if linenum + 1 < len(self.lines) and \
(self.lines[linenum].startswith('--- ') and \
self.lines[linenum + 1].startswith('+++ ')):
# check if we're a new file
if self.lines[linenum].split()[1] == "/dev/null":
info['origInfo'] = PRE_CREATION
return True
else:
return False
class HgWebClient(object):
FULL_FILE_URL = '%(url)s/%(rawpath)s/%(revision)s/%(quoted_path)s'
def __init__(self, repoPath, username, password):
self.url = repoPath
self.username = username
self.password = password
logging.debug('Initialized HgWebClient with url=%r, username=%r',
self.url, self.username)
def cat_file(self, path, rev="tip"):
if rev == HEAD or rev == UNKNOWN:
rev = "tip"
elif rev == PRE_CREATION:
rev = ""
found = False
for rawpath in ["raw-file", "raw"]:
full_url = ''
try:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.url, self.username,
self.password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
full_url = self.FULL_FILE_URL % {
'url': self.url.rstrip('/'),
'rawpath': rawpath,
'revision': rev,
'quoted_path': urllib_quote(path.lstrip('/')),
}
f = opener.open(full_url)
return f.read()
except urllib2.HTTPError, e:
if e.code != 404:
logging.error("%s: HTTP error code %d when fetching "
"file from %s: %s", self.__class__.__name__,
e.code, full_url, e)
except Exception:
logging.exception('%s: Non-HTTP error when fetching %r: ',
self.__class__.__name__, full_url)
if not found:
raise FileNotFoundError(path, rev, str(e))
def get_filenames(self, rev):
raise NotImplemented
class HgClient(object):
def __init__(self, repoPath, local_site):
from mercurial import hg, ui
from mercurial.__version__ import version
version_parts = [int(x) for x in version.split(".")]
if version_parts[0] == 1 and version_parts[1] <= 2:
hg_ui = ui.ui(interactive=False)
else:
hg_ui = ui.ui()
hg_ui.setconfig('ui', 'interactive', 'off')
# Check whether ssh is configured for mercurial. Assume that any
# configured ssh is set up correctly for this repository.
hg_ssh = hg_ui.config('ui', 'ssh')
if not hg_ssh:
logging.debug('Using rbssh for mercurial')
hg_ui.setconfig('ui', 'ssh', 'rbssh --rb-local-site=%s'
% local_site)
else:
logging.debug('Found configured ssh for mercurial: %s' % hg_ssh)
self.repo = hg.repository(hg_ui, path=repoPath)
def cat_file(self, path, rev="tip"):
if rev == HEAD:
rev = "tip"
elif rev == PRE_CREATION:
rev = ""
try:
return self.repo.changectx(rev).filectx(path).data()
except Exception, e:
# LookupError moves from repo to revlog in hg v0.9.4, so we
# catch the more general Exception to avoid the dependency.
raise FileNotFoundError(path, rev, str(e))
def get_filenames(self, rev):
return self.repo.changectx(rev).TODO
| {
"content_hash": "50fdd7642939ce060d19b72098952046",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 79,
"avg_line_length": 36.14942528735632,
"alnum_prop": 0.5319554848966613,
"repo_name": "chazy/reviewboard",
"id": "36da32197c3f9f3931e7966eba92b857960661ac",
"size": "9435",
"binary": false,
"copies": "1",
"ref": "refs/heads/gradeboard",
"path": "reviewboard/scmtools/hg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "685"
},
{
"name": "C#",
"bytes": "340"
},
{
"name": "CSS",
"bytes": "58852"
},
{
"name": "Java",
"bytes": "340"
},
{
"name": "JavaScript",
"bytes": "284566"
},
{
"name": "Objective-C",
"bytes": "288"
},
{
"name": "PHP",
"bytes": "225"
},
{
"name": "Perl",
"bytes": "103"
},
{
"name": "Python",
"bytes": "1553183"
},
{
"name": "Ruby",
"bytes": "172"
},
{
"name": "Shell",
"bytes": "829"
}
],
"symlink_target": ""
} |
"""
boards module models
"""
import logging
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.functional import cached_property
from boards.issues import BoardIssue
from boards.managers import BoardsQuerySet
from zenboard.utils import github_api, zenhub_api
logger = logging.getLogger(__name__)
class Board(models.Model):
"""
Model representation of a Zenhub, read only board. It specifies how should
the ZenHub data be filtered and who should be able to access it.
"""
name = models.CharField(
verbose_name='name',
max_length=255,
)
slug = models.SlugField(
verbose_name='slug',
unique=True,
)
github_repository = models.CharField(
verbose_name='GitHub repository',
max_length=255,
help_text="In format: ':owner/:repo'."
)
github_repository_id = models.CharField(
verbose_name='GitHub repository ID',
max_length=255,
editable=False, # This gets populated from 'github_repository' field
)
github_labels = models.CharField(
verbose_name='GitHub labels name',
max_length=255,
blank=True,
help_text="Comma separated list of GitHub labels that will be used "
"to filter issues. If none provided, all issues will be "
"visible.",
)
filter_sign = models.CharField(
verbose_name='filter sign',
max_length=16,
blank=True,
default='🐙',
help_text="Issue description and comments will only be visible if "
"they contain this sign / string. If none provided, "
"everything will be shown.",
)
include_epics = models.BooleanField(
verbose_name='include Epic issues',
default=False,
)
show_closed_pipeline = models.BooleanField(
verbose_name="show 'Closed' pipeline",
default=True,
)
is_active = models.BooleanField(
verbose_name='is active',
default=True,
)
whitelisted_users = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name='boards',
verbose_name='whitelisted users',
blank=True,
)
created = models.DateTimeField(
verbose_name='created at',
editable=False,
auto_now_add=True,
)
modified = models.DateTimeField(
verbose_name='modified at',
editable=False,
auto_now=True,
)
objects = BoardsQuerySet.as_manager()
class Meta:
verbose_name = "Board"
verbose_name_plural = "Boards"
get_latest_by = 'modified'
ordering = ('-modified', '-created')
@cached_property
def gh_repo(self):
"""
Helper method for getting GitHub repository client.
:returns: GitHub repository client
:rtype: github3.repos.Repository
"""
owner, repo = self.github_repository.split('/')
gh_repo = github_api.repository(owner, repo)
return gh_repo
def _get_filtered_issues(self):
"""
Get uncached filtered list of board GitHub issues. We filter issues
based on GitHub labels, so we have to first get the list of allowed
issues numbers and then use that to filter data from ZenHub API.
:returns: filtered GitHub issues
:rtype: dict
"""
gh_issues = self.gh_repo.iter_issues(
labels=self.github_labels,
state='all',
)
filtered_issues = dict()
for gh_issue in gh_issues:
filtered_issues[gh_issue.number] = {
'number': gh_issue.number,
'title': gh_issue.title,
'state': gh_issue.state,
}
return filtered_issues
def _get_pipelines(self):
"""
Get uncached board pipelines data from ZenHub API.
:returns: board pipeline list
:rtype: list
"""
pipelines = list()
zenhub_board = zenhub_api.get_board(self.github_repository_id)
filtered_issues = self.filtered_issues()
# Zenhub doesn't track closed issues so we have to add them manually
if self.show_closed_pipeline:
closed_filtered_issues_numbers = [
issue_number
for issue_number, issue in filtered_issues.items()
if issue['state'] == 'closed'
]
zenhub_board.append({
'name': 'Closed',
# This is to mimic ZenHub API response format
'issues': [
{'issue_number': issue_number}
for issue_number in closed_filtered_issues_numbers
],
})
# Iterate through pipelines and their issues, filter them and get
# their title
for pipeline in zenhub_board:
pipeline_issues = list()
for issue in pipeline['issues']:
issue_number = issue['issue_number']
if issue_number not in filtered_issues:
continue
if not self.include_epics and issue.get('is_epic', False):
continue
issue_data = filtered_issues[issue_number]
issue_data['is_epic'] = issue.get('is_epic', False)
issue_data['details_url'] = BoardIssue(
board=self, issue_number=issue_number,
).get_api_endpoint()
pipeline_issues.append(issue_data)
pipelines.append({
'name': pipeline['name'],
'issues': pipeline_issues
})
return pipelines
def filtered_issues(self):
"""
Get cached (if possible) filtered list of board GitHub issues.
:returns: filtered GitHub issues
:rtype: dict
"""
return cache.get_or_set(
key=self.get_cache_key('filtered_issues'),
default=self._get_filtered_issues,
timeout=settings.BOARDS_CACHE_TIMEOUT,
)
def pipelines(self):
"""
Get cached (if possible) board pipelines data from ZenHub API.
:returns: filtered GitHub issues
:rtype: dict
"""
return cache.get_or_set(
key=self.get_cache_key('pipelines'),
default=self._get_pipelines,
timeout=settings.BOARDS_CACHE_TIMEOUT,
)
def get_cache_key(self, resource):
"""
Helper method for generating a resource cache key.
:param resource: resource type
:type resource: str
:returns: current board data unique cache key
:rtype: str
"""
return '{app_label}.{object_name}:{pk}:{resource}'.format(
app_label=self._meta.app_label,
object_name=self._meta.object_name,
pk=self.pk,
resource=resource,
)
def invalidate_cache(self, resource='*', glob=True):
"""
Helper method for invalidating all related cache.
:param resource: path to resource that we want to invalidate
:type resource: str
:param glob: wheter you can use glob syntax to match multiple keys
:type glob: bool
"""
if glob:
cache.delete_pattern(self.get_cache_key(resource))
else:
cache.delete(self.get_cache_key(resource))
def __str__(self):
return '{0.name} board (PK: {0.pk})'.format(self)
def get_absolute_url(self):
return reverse('boards:details', kwargs={'slug': self.slug})
def clean(self):
"""
Extend Django's `clean` method and additional validation.
"""
# Make sure that provided GitHub repo is valid and accessible
try:
gh_repo = self.gh_repo
if not gh_repo:
raise ValueError
except AttributeError:
raise ValidationError(
"GitHub client isn't configured properly."
)
except ValueError:
raise ValidationError({
'github_repository': "Inaccessible GitHub repository."
})
else:
self.github_repository_id = gh_repo.id
# Strip any leading and trailing whitespace just to be safe
self.github_labels = self.github_labels.strip()
return super().clean()
def save(self, *args, **kwargs):
"""
Extend Django's `save` method and create a GitHub webhook on creation.
"""
if not self.pk:
receiver_url = 'https://{domain}{webhook_endpoint}'.format(
domain=Site.objects.get_current().domain,
webhook_endpoint=reverse('webhooks:github'),
)
webhook_kwargs = {
'name': 'web',
'config': {
'url': receiver_url,
'content_type': 'json',
},
'events': ['issues', 'issue_comment'],
}
# Only include secret if it's set
if settings.GITHUB_WEBHOOK_SECRET:
kwargs['config']['secret'] = settings.GITHUB_WEBHOOK_SECRET
hook = self.gh_repo.create_hook(**webhook_kwargs)
if hook:
logger.info(
"GitHub webhook for {!r} with kwargs '{}' created".format(
self, kwargs,
)
)
else:
logger.warning(
"GitHub webhook for {!r} wasn't created".format(self)
)
return super(Board, self).save(*args, **kwargs)
| {
"content_hash": "e05226d96e0c973d32df216eda24a8ae",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 78,
"avg_line_length": 30.134556574923547,
"alnum_prop": 0.558859346458291,
"repo_name": "pawelad/zenboard",
"id": "69bb6b6d2f33b74dd2d2c922e3265947a38ad3b8",
"size": "9857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/boards/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1550"
},
{
"name": "HTML",
"bytes": "11694"
},
{
"name": "Python",
"bytes": "45362"
}
],
"symlink_target": ""
} |
from __future__ import print_function, absolute_import
import numpy as np
from keras.optimizers import Adam, SGD
from keras.models import Model, Sequential #, model_from_json
from keras.regularizers import l2
from keras.layers import Input, Dense, Dropout, Flatten, Activation, ELU
from keras.layers.normalization import BatchNormalization
from keras.layers import Convolution2D, MaxPooling2D, GlobalMaxPooling2D
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import keras.callbacks
#import tensorflow as tf
import os
import matplotlib.pyplot as plt
import prepare_dataset as pd
#from keras.utils.generic_utils import get_from_module
#def spatialsoftmax(x):
# # Assume features is of size [N, H, W, C] (batch_size, height, width, channels).
# # Transpose it to [N, C, H, W], then reshape to [N * C, H * W] to compute softmax
# # jointly over the image dimensions.
# s = np.shape(x)
# x = K.reshape(K.transpose(x, [0, 3, 1, 2]), [s[0] * s[3], s[1] * s[2]])
# softmax = K.softmax(x)
# # Reshape and transpose back to original format.
# softmax = K.transpose(K.reshape(softmax, [s[0], s[3], s[1], s[2]]), [0, 2, 3, 1])
# return softmax
#
#def get(identifier):
# return get_from_module(identifier, globals(), 'activation function')
model_num = 5### remember to give new model number every iteration
use_mean = False
load_weights = False
#os.path.join(fig_dir, 'model_name' + fig_ext)
#files_dir =
logs_path = "/home/exx/Avinash/iMARLO/newdata/run%d/" % model_num
model_filename = 'newdata/model%d.json' % model_num
weights_filename = 'newdata/model%d.h5' % model_num
csvlog_filename = 'newdata/model%d.csv' % model_num
loss_image_filename = 'newdata/model%d_loss.png' % model_num
pred_image_filename = 'newdata/model%d_predict.png' % model_num
predsmall_image_filename = 'newdata/model%d_predictsmall.png' % model_num
## tensorboard --logdir /home/exx/Avinash/iMARLO/fullytrained/
tbCallBack = keras.callbacks.TensorBoard(log_dir=logs_path, histogram_freq=0, write_graph=True, write_images=True)
csvlog = keras.callbacks.CSVLogger(csvlog_filename, separator=',', append=False )
mdlchkpt = keras.callbacks.ModelCheckpoint(weights_filename, monitor='val_loss',
save_best_only=True, save_weights_only=True, period=2)
erlystp = keras.callbacks.EarlyStopping(monitor='val_mean_absolute_error',min_delta=1e-4,patience=10)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.66, patience=10, min_lr=1e-5)
batch_size = 16
nb_epoch = 100
np.random.seed(1337) # for reproducibility
############################# Prepare Data #########################################
# input image dimensions
Im, cd = pd.get_dataset(regression=True, pretrain = False, image_dir = '/data/new_sl_data')
if K.image_dim_ordering() == 'th':
Im = Im/255
Im = Im.transpose(0,3,1,2)
Im = Im[:,:,37:131,:] #convert to a square image
img_pgs, img_rows, img_cols = Im.shape[1], Im.shape[2], Im.shape[3]
image_shape = (img_pgs, img_rows, img_cols)
else:
Im = Im/255
Im = Im[:,:,37:131,:] #convert to a square image
img_rows, img_cols, img_pgs = Im.shape[1], Im.shape[2], Im.shape[3]
image_shape = (img_rows, img_cols, img_pgs)
if use_mean:
### Subtract Means
mean_Im = np.mean(Im,axis=0,keepdims=True)
mean_cd = np.mean(cd,axis=0,keepdims=True)
Im -= mean_Im
cd -= mean_cd
nb_samples = Im.shape[0]
nb_train, nb_test = int(0.75*nb_samples), int(0.25*nb_samples)
### Split Data into test and train
Im_train, Im_test = Im[0:nb_train], Im[nb_train:nb_train + nb_test]
cd_train, cd_test = cd[0:nb_train], cd[nb_train:nb_train + nb_test]
nb_train_small, nb_test_small = int(0.75*nb_train), int(0.75*nb_test)
Im_train_small, Im_test_small = Im_train[0:nb_train_small], Im_test[0:nb_test_small]
cd_train_small, cd_test_small = cd_train[0:nb_train_small], cd_test[0:nb_test_small]
del Im, cd
print('Im_train shape:', Im_train.shape)
print(Im_train.shape[0], 'train samples')
print(Im_test.shape[0], 'test samples')
image_ip = Input(shape=image_shape,name = 'image_input')
randinit = 'glorot_normal'
#if os.path.exists(model_filename):
# model = pd.get_model_givenpath(model_num, model_filename)
# if load_weights:
# model = pd.get_model_givenpath(model_num, model_filename, weights_filename)
#
# model.summary()
# raw_input("Press Enter to continue...")
# #sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='mse', optimizer='adam', metrics=['mae'])
#else:
######################## Load Novel CNN Layers #############################################
print('Loading New CNN model......')
##### DVP Architecture
x = Convolution2D(16, 5, 5, activation='relu', border_mode='valid', init = randinit, name='block1_conv1')(image_ip)
# x = Convolution2D(16, 5, 5, activation='relu', border_mode='valid', name='block1_conv2')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
# x = BatchNormalization(16)
x = Convolution2D(32, 3, 3, activation='relu', border_mode='valid', init = randinit, name='block2_conv1')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
# x = BatchNormalization(32)(x)
x = Convolution2D(32, 3, 3, activation='relu', border_mode='valid', init = randinit, name='block3_conv1')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Convolution2D(32, 3, 3, activation='relu', border_mode='valid', init = randinit, name='block4_conv1')(x)
# Block 5
#x = Convolution2D(32, 3, 3, activation='relu', border_mode='valid', init = 'lecun_uniform', name='block5_conv1')(x)
# Block 6
#x = Convolution2D(1, 3, 3, activation='relu', border_mode='valid', init = 'lecun_uniform', name='block6_conv1')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
#x = GlobalMaxPooling2D()(x)
x = Flatten()(x)
#x = Activation('softmax')(x)
# x = BatchNormalization(591680)(x)
x = Dense(128, activation='relu', init = randinit, name='fc1')(x)
# x = BatchNormalization(256)(x)
x = Dense(128, activation='relu', init = randinit, name='fc2')(x)
# x = BatchNormalization(256)(x)
Out = Dense(1,activation='linear', init = randinit, name='pred')(x)
model = Model(input=image_ip, output=Out)
model.summary()
raw_input("Press Enter to continue...")
adam = Adam(lr=5e-4)
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
history = model.fit(Im_train_small, cd_train_small, batch_size=batch_size, nb_epoch=nb_epoch, validation_split=0.2, verbose=2,callbacks=[tbCallBack,csvlog,reduce_lr,mdlchkpt])
score_small = model.evaluate(Im_test_small, cd_test_small, verbose=0)
score = model.evaluate(Im_test, cd_test, verbose=0)
print('SmallTestData MSE:', score_small[0])
print('SmallTestData MAE:', score_small[1])
print('FullTestData MSE:', score[0])
print('FullTestData MAE', score[1])
###### Plots ###
eps = history.epoch #np.arange(1,nb_epoch+1,1)
train_loss = np.array(history.history['loss'])
val_loss = np.array(history.history['val_loss'])
plt.figure(1)
plt.plot(eps,train_loss,'b-',eps,val_loss,'r-')
plt.ylabel('loss')
plt.xlabel('#epochs')
plt.axis([0, 55, 0, 0.1])
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(loss_image_filename)
print('Plotted Loss for training and validation data')
predict_small = model.predict(Im_test_small)
import time
start_time = time.time()
predict = model.predict(Im_test)
print("Time taken is %s seconds " % (time.time() - start_time))
raw_input("Press Enter to continue...")
if use_mean:
# adding mean
predict+=mean_cd
cd_test+=mean_cd
predict_small+=mean_cd
cd_test_small+=mean_cd
plt.figure(2)
plt.plot(predict, cd_test,'bo',linewidth=1)
plt.xlabel('Predictions')
plt.ylabel('ActualValue')
plt.title('Learning Evaluation')
plt.grid(True)
plt.savefig(pred_image_filename)
print('Plotted Predictions for test data')
plt.show()
plt.figure(3)
plt.plot(predict_small, cd_test_small,'bo',linewidth=1)
plt.xlabel('Predictions')
plt.ylabel('ActualValue')
plt.title('Learning Evaluation')
plt.grid(True)
plt.show()
plt.savefig(predsmall_image_filename)
print('Plotted Predictions for small test data')
model_json = model.to_json()
with open(model_filename, "w") as json_file:
json_file.write(model_json)
print("Saved model to disk")
# serialize weights to HDF5
#model.save_weights(weights_filename)
#print("Saved weights to disk")
################ modified VGG16 Model ####################
#x = Convolution2D(32, 3, 3, activation='relu', name='block1_conv1')(image_ip)
#x = Convolution2D(32, 3, 3, activation='relu', name='block1_conv2')(x)
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
#x = Convolution2D(64, 3, 3, activation='relu', name='block2_conv1')(x)
#x = Convolution2D(64, 3, 3, activation='relu', name='block2_conv2')(x)
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
#x = Convolution2D(128, 3, 3, activation='relu', name='block3_conv1')(x)
#x = Convolution2D(128, 3, 3, activation='relu', name='block3_conv2')(x)
#x = Convolution2D(128, 3, 3, activation='relu', name='block3_conv3')(x)
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
#x = Convolution2D(256, 3, 3, activation='relu', name='block4_conv1')(x)
#x = Convolution2D(256, 3, 3, activation='relu', name='block4_conv2')(x)
#x = Convolution2D(256, 3, 3, activation='relu', name='block4_conv3')(x)
### This reduces to a 1-d array
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
## Block 5
#x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)
#x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)
#x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
#model = Model(image_ip,x)
########### modified DRVGG16 Model ########################
#x = Convolution2D(64, 3, 3, activation='relu', name='block1_conv1')(image_ip)
#x = Convolution2D(64, 3, 3, activation='relu', name='block1_conv2')(x)
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
#x = Convolution2D(128, 3, 3, activation='relu', name='block2_conv1')(x)
#x = Convolution2D(128, 3, 3, activation='relu', name='block2_conv2')(x)
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
#x = Convolution2D(256, 3, 3, activation='relu', name='block3_conv1')(x)
#x = Convolution2D(256, 3, 3, activation='relu', name='block3_conv2')(x)
#x = Convolution2D(256, 3, 3, activation='relu', name='block3_conv3')(x)
#x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
#x = Convolution2D(512, 3, 3, activation='relu', name='block4_conv1')(x)
#x = Convolution2D(512, 3, 3, activation='relu', name='block4_conv2')(x)
#x = Convolution2D(512, 3, 3, activation='relu', name='block4_conv3')(x)
#
### This reduces to a 1-d array
#
##x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
#
#x = Flatten()(x)
#x = Dense(256, activation='relu')(x)
#x = Dense(64, activation='relu')(x)
#Out = Dense(1,activation='relu')(x)
#
#model = Model(input=image_ip, output=Out)
#model.summary()
#model = Model(image_ip,x)
| {
"content_hash": "e6c47d0c6ecb38330bf37a5e5f8e10eb",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 175,
"avg_line_length": 38.764309764309765,
"alnum_prop": 0.6631633805263615,
"repo_name": "babraham123/deepdriving",
"id": "f06ba745c602c92e191cedf71958a0a26fde0f4f",
"size": "11513",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "convnets-keras-master/convnetskeras/train_model_reg2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "121"
},
{
"name": "Python",
"bytes": "278717"
},
{
"name": "Shell",
"bytes": "206"
}
],
"symlink_target": ""
} |
from typing import Dict
import pytest
DEMISTO_ARGS: Dict = {"stripSubject": 'True',
"escapeColons": 'False',
"searchThisWeek": 'true',
"from": "my_test_mail@test.com",
"subject": "test",
"body": "this is a test"}
EXPECTED_RESULTS = 'From:"my_test_mail@test.com" AND Subject:"test" AND Body:"this is a test" AND Received:"this week"'
@pytest.mark.parametrize('args, expected_results', [
(DEMISTO_ARGS, EXPECTED_RESULTS),
])
def test_build_ews_query(args, expected_results):
"""
Given:
- args dictionary.
When:
- running BuildEWSQuery script.
Then:
- Ensure that the query was built correctly.
"""
from BuildEWSQuery import build_ews_query
results = build_ews_query(args)
assert expected_results == results.readable_output
| {
"content_hash": "6159bc8061ae816bf3a3936b4c851fff",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 119,
"avg_line_length": 29.9,
"alnum_prop": 0.5875139353400223,
"repo_name": "demisto/content",
"id": "aa8d702e7fb1b5e7943d05a77c368f68afab0f39",
"size": "897",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/EWS/Scripts/BuildEWSQuery/BuildEWSQuery_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
from model_utils.models import TimeStampedModel
class Vote(TimeStampedModel):
VALUES = Choices(
(1, "not_interested", _("Not Interested")),
(2, "maybe", _("Maybe")),
(3, "want_to_see", _("Want to See")),
(4, "must_see", _("Must See")),
)
value = models.IntegerField(_("vote"), choices=VALUES)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("user"),
on_delete=models.PROTECT,
related_name="votes",
)
submission = models.ForeignKey(
"submissions.Submission",
on_delete=models.CASCADE,
verbose_name=_("submission"),
related_name="votes",
)
def __str__(self):
return f"{self.user} voted {self.value} for Submission {self.submission}"
class Meta:
verbose_name = _("Vote")
verbose_name_plural = _("Votes")
unique_together = ("user", "submission")
| {
"content_hash": "a52ad584be41c646de2baad031594256",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 81,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.6048014773776547,
"repo_name": "patrick91/pycon",
"id": "70e613082d7a8e20260c04f14c19bdb174137918",
"size": "1083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/voting/models/vote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1456"
},
{
"name": "Python",
"bytes": "13911"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.