text stringlengths 4 1.02M | meta dict |
|---|---|
from pykechain.exceptions import _DeprecationMixin
from pykechain.models import DatetimeProperty
class DatetimeProperty2(DatetimeProperty, _DeprecationMixin):
"""A virtual object representing a KE-chain datetime property."""
pass
| {
"content_hash": "8d3121c4d221b4791b66090714cc9faf",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 30.125,
"alnum_prop": 0.8049792531120332,
"repo_name": "KE-works/pykechain",
"id": "e7d01cf698ba8e16cedae5b292c73c0b246a3fed",
"size": "241",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pykechain/models/property2_datetime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1370094"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
} |
"""
Passenger WSGI file for IGG.
Ensures that a virtualenv exists and that the current interpreter is the one
inside this virtualenv. Also appends ``apps`` and ``src`` to the current python
path so that the igg and third-party modules can be imported.
Imports the Django WSGI application handler from ``igg.wsgi``.
"""
import sys
import os
PYTHON = os.path.join(os.getcwd(), 'env/bin/python')
# First, make sure the virtualenv python exists.
if not os.path.exists(PYTHON):
error = 'ERROR: VirtualEnv does not exist, see README!'
print >> sys.stderr , error
# Attempt to write to a file in case we can't see stderr
try:
from datetime import datetime
f = open('error.log', 'a')
f.write(str(datetime.now()) + ' - ' + error)
f.close()
except: # Fail silently
pass
sys.exit(1)
# Second, make sure we are being run by the virtualenv's python. If not, make
# it so. PYTHON is present twice so that the new python interpreter knows the
# actual executable path
if sys.executable != PYTHON:
os.execl(PYTHON, PYTHON, *sys.argv)
# Inject some paths
sys.path.insert(0, os.path.join(os.getcwd(), 'apps'))
sys.path.insert(0, os.path.join(os.getcwd(), 'src'))
from igg.wsgi import application
| {
"content_hash": "e5be6280474d379b2b6a7f8e84a5f1a4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 30.5,
"alnum_prop": 0.7081967213114754,
"repo_name": "IndieGamesForGood/IGG-v2",
"id": "3269b23bce2fc7336a78ba62c8c5c5037730b31f",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "passenger_wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "53453"
},
{
"name": "Python",
"bytes": "40706"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
} |
DEPS = [
'checkout',
'env',
'infra',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
DOCKER_IMAGE = 'gcr.io/skia-public/gold-karma-chrome-tests:68.0.3440.106_v6'
INNER_KARMA_SCRIPT = '/SRC/skia/infra/canvaskit/test_canvaskit.sh'
def RunSteps(api):
api.vars.setup()
checkout_root = api.checkout.default_checkout_root
out_dir = api.vars.swarming_out_dir
api.checkout.bot_update(checkout_root=checkout_root)
# Make sure this exists, otherwise Docker will make it with root permissions.
api.file.ensure_directory('mkdirs out_dir', out_dir, mode=0777)
# The karma script is configured to look in ./canvaskit/bin/ for
# the test files to load, so we must copy them there (see Set up for docker).
copy_dest = checkout_root.join('skia', 'modules', 'canvaskit',
'canvaskit', 'bin')
base_dir = api.vars.build_dir
bundle_name = 'canvaskit.wasm'
api.python.inline(
name='Set up for docker',
program='''import errno
import os
import shutil
import sys
copy_dest = sys.argv[1]
base_dir = sys.argv[2]
bundle_name = sys.argv[3]
out_dir = sys.argv[4]
# Clean out old binaries (if any)
try:
shutil.rmtree(copy_dest)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Make folder
try:
os.makedirs(copy_dest)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Copy binaries (canvaskit.js and canvaskit.wasm) to where the karma tests
# expect them ($SKIA_ROOT/modules/canvaskit/canvaskit/bin/)
dest = os.path.join(copy_dest, 'canvaskit.js')
shutil.copyfile(os.path.join(base_dir, 'canvaskit.js'), dest)
os.chmod(dest, 0o644) # important, otherwise non-privileged docker can't read.
if bundle_name:
dest = os.path.join(copy_dest, bundle_name)
shutil.copyfile(os.path.join(base_dir, bundle_name), dest)
os.chmod(dest, 0o644) # important, otherwise non-privileged docker can't read.
# Prepare output folder, api.file.ensure_directory doesn't touch
# the permissions of the out directory if it already exists.
os.chmod(out_dir, 0o777) # important, otherwise non-privileged docker can't write.
''',
args=[copy_dest, base_dir, bundle_name, out_dir],
infra_step=True)
cmd = ['docker', 'run', '--shm-size=2gb', '--rm',
'--volume', '%s:/SRC' % checkout_root,
'--volume', '%s:/OUT' % out_dir]
cmd.extend([
DOCKER_IMAGE, INNER_KARMA_SCRIPT,
'--builder', api.vars.builder_name,
'--git_hash', api.properties['revision'],
'--buildbucket_build_id', api.properties.get('buildbucket_build_id',
''),
'--bot_id', api.vars.swarming_bot_id,
'--task_id', api.vars.swarming_task_id,
'--browser', 'Chrome',
'--config', api.vars.configuration,
'--source_type', 'canvaskit',
])
if api.vars.is_trybot:
cmd.extend([
'--issue', api.vars.issue,
'--patchset', api.vars.patchset,
])
# Override DOCKER_CONFIG set by Kitchen.
env = {'DOCKER_CONFIG': '/home/chrome-bot/.docker'}
with api.env(env):
api.run(
api.step,
'Test CanvasKit with Docker',
cmd=cmd)
def GenTests(api):
yield (
api.test('Test-Debian9-EMCC-GCE-GPU-WEBGL1-wasm-Debug-All-CanvasKit') +
api.properties(buildername=('Test-Debian9-EMCC-GCE-GPU-WEBGL1'
'-wasm-Debug-All-CanvasKit'),
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
yield (
api.test('canvaskit_trybot') +
api.properties(buildername=('Test-Debian9-EMCC-GCE-CPU-AVX2'
'-wasm-Debug-All-CanvasKit'),
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
patch_ref='89/456789/12',
patch_repo='https://skia.googlesource.com/skia.git',
patch_storage='gerrit',
patch_set=7,
patch_issue=1234,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/')
)
| {
"content_hash": "92a6d9768a41048b8f23f883b304811d",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 82,
"avg_line_length": 32.73381294964029,
"alnum_prop": 0.5876923076923077,
"repo_name": "Hikari-no-Tenshi/android_external_skia",
"id": "849d9c4b738425a6de670071c22ad5adf6b4b904",
"size": "4767",
"binary": false,
"copies": "3",
"ref": "refs/heads/10.0",
"path": "infra/bots/recipes/test_canvaskit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "12375"
},
{
"name": "Batchfile",
"bytes": "1155"
},
{
"name": "C",
"bytes": "860408"
},
{
"name": "C++",
"bytes": "24242899"
},
{
"name": "CSS",
"bytes": "11147"
},
{
"name": "Go",
"bytes": "29067"
},
{
"name": "HTML",
"bytes": "932599"
},
{
"name": "Java",
"bytes": "24340"
},
{
"name": "JavaScript",
"bytes": "377437"
},
{
"name": "Makefile",
"bytes": "67776"
},
{
"name": "Objective-C",
"bytes": "23795"
},
{
"name": "Objective-C++",
"bytes": "111148"
},
{
"name": "Python",
"bytes": "499622"
},
{
"name": "Shell",
"bytes": "63350"
}
],
"symlink_target": ""
} |
__author__ = "Guillaume Luchet <guillaume@geelweb.org>"
from rest_framework import serializers
from geelweb.django.quickpoll.models import Poll, Question, Rule
class PollSerializer(serializers.ModelSerializer):
class Meta:
model = Poll
class RuleSerializer(serializers.ModelSerializer):
class Meta:
model = Rule
| {
"content_hash": "02b2ff297d7556d49a1baf9e2fb957a3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 64,
"avg_line_length": 28.25,
"alnum_prop": 0.7492625368731564,
"repo_name": "geelweb/geelweb-django-quickpoll",
"id": "e5e4ec9c8c8862bc370fc2c1958876e97e33a341",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/geelweb/django/quickpoll/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "681"
},
{
"name": "JavaScript",
"bytes": "9258"
},
{
"name": "Python",
"bytes": "8497"
}
],
"symlink_target": ""
} |
"""
Handles all requests relating to consistency groups.
"""
import functools
from oslo.config import cfg
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types
CONF = cfg.CONF
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
CGQUOTAS = quota.CGQUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution.
This decorator requires the first 3 args of the wrapped function
to be (self, context, consistencygroup)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'consistencygroup:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager for consistency groups."""
def __init__(self, db_driver=None):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zone_names = ()
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def _valid_availability_zone(self, availability_zone):
if availability_zone in self.availability_zone_names:
return True
if CONF.storage_availability_zone == availability_zone:
return True
azs = self.volume_api.list_availability_zones()
self.availability_zone_names = [az['name'] for az in azs]
return availability_zone in self.availability_zone_names
def _extract_availability_zone(self, availability_zone):
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
valid = self._valid_availability_zone(availability_zone)
if not valid:
msg = _("Availability zone '%s' is invalid") % (availability_zone)
LOG.warn(msg)
raise exception.InvalidInput(reason=msg)
return availability_zone
def create(self, context, name, description,
cg_volume_types=None, availability_zone=None):
check_policy(context, 'create')
volume_type_list = None
if cg_volume_types:
volume_type_list = cg_volume_types.split(',')
req_volume_types = []
if volume_type_list:
req_volume_types = (self.db.volume_types_get_by_name_or_id(
context, volume_type_list))
if not req_volume_types:
volume_type = volume_types.get_default_volume_type()
req_volume_types.append(volume_type)
req_volume_type_ids = ""
for voltype in req_volume_types:
if voltype:
req_volume_type_ids = (
req_volume_type_ids + voltype.get('id') + ",")
if len(req_volume_type_ids) == 0:
req_volume_type_ids = None
availability_zone = self._extract_availability_zone(availability_zone)
options = {'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': availability_zone,
'status': "creating",
'name': name,
'description': description,
'volume_type_id': req_volume_type_ids}
group = None
try:
group = self.db.consistencygroup_create(context, options)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error occurred when creating consistency group"
" %s."), name)
request_spec_list = []
filter_properties_list = []
for req_volume_type in req_volume_types:
request_spec = {'volume_type': req_volume_type.copy(),
'consistencygroup_id': group['id']}
filter_properties = {}
request_spec_list.append(request_spec)
filter_properties_list.append(filter_properties)
# Update quota for consistencygroups
self.update_quota(context, group['id'])
self._cast_create_consistencygroup(context, group['id'],
request_spec_list,
filter_properties_list)
return group
def _cast_create_consistencygroup(self, context, group_id,
request_spec_list,
filter_properties_list):
try:
for request_spec in request_spec_list:
volume_type = request_spec.get('volume_type', None)
volume_type_id = None
if volume_type:
volume_type_id = volume_type.get('id', None)
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(
volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
volume_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': request_spec.get('encryption_key_id',
None),
'display_description': request_spec.get('description',
None),
'display_name': request_spec.get('name', None),
'volume_type_id': volume_type_id,
}
request_spec['volume_properties'] = volume_properties
request_spec['qos_specs'] = specs
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.consistencygroup_destroy(context, group_id)
finally:
LOG.error(_("Error occurred when building "
"request spec list for consistency group "
"%s."), group_id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.scheduler_rpcapi.create_consistencygroup(
context,
CONF.volume_topic,
group_id,
request_spec_list=request_spec_list,
filter_properties_list=filter_properties_list)
def update_quota(self, context, group_id):
reserve_opts = {'consistencygroups': 1}
try:
reservations = CGQUOTAS.reserve(context, **reserve_opts)
CGQUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.consistencygroup_destroy(context, group_id)
finally:
LOG.error(_("Failed to update quota for creating"
"consistency group %s."), group_id)
@wrap_check_policy
def delete(self, context, group, force=False):
if not force and group['status'] not in ["available", "error"]:
msg = _("Consistency group status must be available or error, "
"but current status is: %s") % group['status']
raise exception.InvalidConsistencyGroup(reason=msg)
cgsnaps = self.db.cgsnapshot_get_all_by_group(
context.elevated(),
group['id'])
if cgsnaps:
msg = _("Consistency group %s still has dependent "
"cgsnapshots.") % group['id']
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
volumes = self.db.volume_get_all_by_group(context.elevated(),
group['id'])
if volumes and not force:
msg = _("Consistency group %s still contains volumes. "
"The force flag is required to delete it.") % group['id']
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
for volume in volumes:
if volume['attach_status'] == "attached":
msg = _("Volume in consistency group %s is attached. "
"Need to detach first.") % group['id']
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context,
volume['id'])
if snapshots:
msg = _("Volume in consistency group still has "
"dependent snapshots.")
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
now = timeutils.utcnow()
self.db.consistencygroup_update(context, group['id'],
{'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_consistencygroup(context, group)
@wrap_check_policy
def update(self, context, group, fields):
self.db.consistencygroup_update(context, group['id'], fields)
def get(self, context, group_id):
rv = self.db.consistencygroup_get(context, group_id)
group = dict(rv.iteritems())
check_policy(context, 'get', group)
return group
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
if filters:
LOG.debug("Searching by: %s" % str(filters))
if (context.is_admin and 'all_tenants' in filters):
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
groups = self.db.consistencygroup_get_all(context)
else:
groups = self.db.consistencygroup_get_all_by_project(
context,
context.project_id)
return groups
def get_group(self, context, group_id):
check_policy(context, 'get_group')
rv = self.db.consistencygroup_get(context, group_id)
return dict(rv.iteritems())
def create_cgsnapshot(self, context,
group, name,
description):
return self._create_cgsnapshot(context, group, name, description)
def _create_cgsnapshot(self, context,
group, name, description):
options = {'consistencygroup_id': group['id'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'name': name,
'description': description}
try:
cgsnapshot = self.db.cgsnapshot_create(context, options)
cgsnapshot_id = cgsnapshot['id']
volumes = self.db.volume_get_all_by_group(
context.elevated(),
cgsnapshot['consistencygroup_id'])
if not volumes:
msg = _("Consistency group is empty. No cgsnapshot "
"will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
snap_name = cgsnapshot['name']
snap_desc = cgsnapshot['description']
self.volume_api.create_snapshots_in_db(
context, volumes, snap_name, snap_desc, True, cgsnapshot_id)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
finally:
LOG.error(_("Error occurred when creating cgsnapshot"
" %s."), cgsnapshot_id)
self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot)
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot, force=False):
if cgsnapshot['status'] not in ["available", "error"]:
msg = _("Cgsnapshot status must be available or error")
raise exception.InvalidCgSnapshot(reason=msg)
self.db.cgsnapshot_update(context, cgsnapshot['id'],
{'status': 'deleting'})
group = self.db.consistencygroup_get(
context,
cgsnapshot['consistencygroup_id'])
self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot,
group['host'])
def update_cgsnapshot(self, context, cgsnapshot, fields):
self.db.cgsnapshot_update(context, cgsnapshot['id'], fields)
def get_cgsnapshot(self, context, cgsnapshot_id):
check_policy(context, 'get_cgsnapshot')
rv = self.db.cgsnapshot_get(context, cgsnapshot_id)
return dict(rv.iteritems())
def get_all_cgsnapshots(self, context, search_opts=None):
check_policy(context, 'get_all_cgsnapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
cgsnapshots = self.db.cgsnapshot_get_all(context)
else:
cgsnapshots = self.db.cgsnapshot_get_all_by_project(
context.elevated(), context.project_id)
if search_opts:
LOG.debug("Searching by: %s" % search_opts)
results = []
not_found = object()
for cgsnapshot in cgsnapshots:
for opt, value in search_opts.iteritems():
if cgsnapshot.get(opt, not_found) != value:
break
else:
results.append(cgsnapshot)
cgsnapshots = results
return cgsnapshots
| {
"content_hash": "9bf649a4e957696cd46f54ebc01b9e7f",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 79,
"avg_line_length": 38.80548628428928,
"alnum_prop": 0.5561339245549772,
"repo_name": "e0ne/cinder",
"id": "b1ae8e47f03d5db67c643877e548e8d5287e2e57",
"size": "16204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/consistencygroup/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from collections import defaultdict
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import orjson
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.db import transaction
from django.http import HttpRequest, HttpResponse
from django.utils.translation import override as override_language
from django.utils.translation import ugettext as _
from zerver.context_processors import get_valid_realm_from_request
from zerver.decorator import (
authenticated_json_view,
require_non_guest_user,
require_post,
require_realm_admin,
)
from zerver.lib.actions import (
bulk_add_subscriptions,
bulk_remove_subscriptions,
do_add_default_stream,
do_add_streams_to_default_stream_group,
do_change_default_stream_group_description,
do_change_default_stream_group_name,
do_change_stream_description,
do_change_stream_invite_only,
do_change_stream_message_retention_days,
do_change_stream_post_policy,
do_change_subscription_property,
do_create_default_stream_group,
do_deactivate_stream,
do_delete_messages,
do_get_streams,
do_remove_default_stream,
do_remove_default_stream_group,
do_remove_streams_from_default_stream_group,
do_rename_stream,
do_send_messages,
gather_subscriptions,
get_default_streams_for_realm,
get_subscriber_emails,
internal_prep_private_message,
internal_prep_stream_message,
)
from zerver.lib.exceptions import ErrorCode, JsonableError, OrganizationOwnerRequired
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.retention import parse_message_retention_days
from zerver.lib.streams import (
StreamDict,
access_default_stream_group_by_id,
access_stream_by_id,
access_stream_by_name,
access_stream_for_delete_or_update,
access_web_public_stream,
check_stream_name,
check_stream_name_available,
filter_stream_authorization,
list_to_streams,
)
from zerver.lib.topic import (
get_topic_history_for_public_stream,
get_topic_history_for_stream,
messages_for_topic,
)
from zerver.lib.types import Validator
from zerver.lib.validator import (
check_bool,
check_capped_string,
check_color,
check_dict,
check_dict_only,
check_int,
check_int_in,
check_list,
check_string,
check_string_or_int,
check_union,
to_non_negative_int,
)
from zerver.models import (
Realm,
Stream,
UserMessage,
UserProfile,
get_active_user,
get_active_user_profile_by_id_in_realm,
get_system_bot,
)
class PrincipalError(JsonableError):
code = ErrorCode.UNAUTHORIZED_PRINCIPAL
data_fields = ['principal']
http_status_code = 403
def __init__(self, principal: Union[int, str]) -> None:
self.principal: Union[int, str] = principal
@staticmethod
def msg_format() -> str:
return _("User not authorized to execute queries on behalf of '{principal}'")
def principal_to_user_profile(agent: UserProfile, principal: Union[str, int]) -> UserProfile:
try:
if isinstance(principal, str):
return get_active_user(principal, agent.realm)
else:
return get_active_user_profile_by_id_in_realm(principal, agent.realm)
except UserProfile.DoesNotExist:
# We have to make sure we don't leak information about which users
# are registered for Zulip in a different realm. We could do
# something a little more clever and check the domain part of the
# principal to maybe give a better error message
raise PrincipalError(principal)
def check_if_removing_someone_else(user_profile: UserProfile,
principals: Optional[Union[List[str], List[int]]]) -> bool:
if principals is None or len(principals) == 0:
return False
if len(principals) > 1:
return True
if isinstance(principals[0], int):
return principals[0] != user_profile.id
else:
return principals[0] != user_profile.email
def deactivate_stream_backend(request: HttpRequest,
user_profile: UserProfile,
stream_id: int) -> HttpResponse:
(stream, sub) = access_stream_for_delete_or_update(user_profile, stream_id)
do_deactivate_stream(stream, acting_user=user_profile)
return json_success()
@require_realm_admin
@has_request_variables
def add_default_stream(request: HttpRequest,
user_profile: UserProfile,
stream_id: int=REQ(validator=check_int)) -> HttpResponse:
(stream, sub) = access_stream_by_id(user_profile, stream_id)
if stream.invite_only:
return json_error(_("Private streams cannot be made default."))
do_add_default_stream(stream)
return json_success()
@require_realm_admin
@has_request_variables
def create_default_stream_group(request: HttpRequest, user_profile: UserProfile,
group_name: str=REQ(), description: str=REQ(),
stream_names: List[str]=REQ(validator=check_list(check_string))) -> None:
streams = []
for stream_name in stream_names:
(stream, sub) = access_stream_by_name(user_profile, stream_name)
streams.append(stream)
do_create_default_stream_group(user_profile.realm, group_name, description, streams)
return json_success()
@require_realm_admin
@has_request_variables
def update_default_stream_group_info(request: HttpRequest, user_profile: UserProfile, group_id: int,
new_group_name: Optional[str]=REQ(validator=check_string, default=None),
new_description: Optional[str]=REQ(validator=check_string,
default=None)) -> None:
if not new_group_name and not new_description:
return json_error(_('You must pass "new_description" or "new_group_name".'))
group = access_default_stream_group_by_id(user_profile.realm, group_id)
if new_group_name is not None:
do_change_default_stream_group_name(user_profile.realm, group, new_group_name)
if new_description is not None:
do_change_default_stream_group_description(user_profile.realm, group, new_description)
return json_success()
@require_realm_admin
@has_request_variables
def update_default_stream_group_streams(request: HttpRequest, user_profile: UserProfile,
group_id: int, op: str=REQ(),
stream_names: List[str]=REQ(
validator=check_list(check_string))) -> None:
group = access_default_stream_group_by_id(user_profile.realm, group_id)
streams = []
for stream_name in stream_names:
(stream, sub) = access_stream_by_name(user_profile, stream_name)
streams.append(stream)
if op == 'add':
do_add_streams_to_default_stream_group(user_profile.realm, group, streams)
elif op == 'remove':
do_remove_streams_from_default_stream_group(user_profile.realm, group, streams)
else:
return json_error(_('Invalid value for "op". Specify one of "add" or "remove".'))
return json_success()
@require_realm_admin
@has_request_variables
def remove_default_stream_group(request: HttpRequest, user_profile: UserProfile,
group_id: int) -> None:
group = access_default_stream_group_by_id(user_profile.realm, group_id)
do_remove_default_stream_group(user_profile.realm, group)
return json_success()
@require_realm_admin
@has_request_variables
def remove_default_stream(request: HttpRequest,
user_profile: UserProfile,
stream_id: int=REQ(validator=check_int)) -> HttpResponse:
(stream, sub) = access_stream_by_id(
user_profile,
stream_id,
allow_realm_admin=True,
)
do_remove_default_stream(stream)
return json_success()
@has_request_variables
def update_stream_backend(
request: HttpRequest, user_profile: UserProfile,
stream_id: int,
description: Optional[str]=REQ(validator=check_capped_string(
Stream.MAX_DESCRIPTION_LENGTH), default=None),
is_private: Optional[bool]=REQ(validator=check_bool, default=None),
is_announcement_only: Optional[bool]=REQ(validator=check_bool, default=None),
stream_post_policy: Optional[int]=REQ(validator=check_int_in(
Stream.STREAM_POST_POLICY_TYPES), default=None),
history_public_to_subscribers: Optional[bool]=REQ(validator=check_bool, default=None),
new_name: Optional[str]=REQ(validator=check_string, default=None),
message_retention_days: Optional[Union[int, str]]=REQ(validator=check_string_or_int, default=None),
) -> HttpResponse:
# We allow realm administrators to to update the stream name and
# description even for private streams.
(stream, sub) = access_stream_for_delete_or_update(user_profile, stream_id)
if message_retention_days is not None:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
user_profile.realm.ensure_not_on_limited_plan()
message_retention_days_value = parse_message_retention_days(
message_retention_days, Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP)
do_change_stream_message_retention_days(stream, message_retention_days_value)
if description is not None:
if '\n' in description:
# We don't allow newline characters in stream descriptions.
description = description.replace("\n", " ")
do_change_stream_description(stream, description)
if new_name is not None:
new_name = new_name.strip()
if stream.name == new_name:
return json_error(_("Stream already has that name!"))
if stream.name.lower() != new_name.lower():
# Check that the stream name is available (unless we are
# are only changing the casing of the stream name).
check_stream_name_available(user_profile.realm, new_name)
do_rename_stream(stream, new_name, user_profile)
if is_announcement_only is not None:
# is_announcement_only is a legacy way to specify
# stream_post_policy. We can probably just delete this code,
# since we're not aware of clients that used it, but we're
# keeping it for backwards-compatibility for now.
stream_post_policy = Stream.STREAM_POST_POLICY_EVERYONE
if is_announcement_only:
stream_post_policy = Stream.STREAM_POST_POLICY_ADMINS
if stream_post_policy is not None:
do_change_stream_post_policy(stream, stream_post_policy)
# But we require even realm administrators to be actually
# subscribed to make a private stream public.
if is_private is not None:
default_stream_ids = {s.id for s in get_default_streams_for_realm(stream.realm_id)}
(stream, sub) = access_stream_by_id(user_profile, stream_id)
if is_private and stream.id in default_stream_ids:
return json_error(_("Default streams cannot be made private."))
do_change_stream_invite_only(stream, is_private, history_public_to_subscribers)
return json_success()
@has_request_variables
def list_subscriptions_backend(
request: HttpRequest,
user_profile: UserProfile,
include_subscribers: bool=REQ(validator=check_bool, default=False),
) -> HttpResponse:
subscribed, _ = gather_subscriptions(
user_profile, include_subscribers=include_subscribers,
)
return json_success({"subscriptions": subscribed})
FuncKwargPair = Tuple[Callable[..., HttpResponse], Dict[str, Union[int, Iterable[Any]]]]
add_subscriptions_schema = check_list(
check_dict_only(
required_keys=[
('name', check_string)
],
optional_keys=[
('color', check_color),
('description', check_capped_string(Stream.MAX_DESCRIPTION_LENGTH)),
],
),
)
remove_subscriptions_schema = check_list(check_string)
@has_request_variables
def update_subscriptions_backend(
request: HttpRequest, user_profile: UserProfile,
delete: Iterable[str]=REQ(validator=remove_subscriptions_schema, default=[]),
add: Iterable[Mapping[str, Any]]=REQ(validator=add_subscriptions_schema, default=[]),
) -> HttpResponse:
if not add and not delete:
return json_error(_('Nothing to do. Specify at least one of "add" or "delete".'))
method_kwarg_pairs: List[FuncKwargPair] = [
(add_subscriptions_backend, dict(streams_raw=add)),
(remove_subscriptions_backend, dict(streams_raw=delete)),
]
return compose_views(request, user_profile, method_kwarg_pairs)
def compose_views(
request: HttpRequest,
user_profile: UserProfile,
method_kwarg_pairs: "List[FuncKwargPair]",
) -> HttpResponse:
'''
This takes a series of view methods from method_kwarg_pairs and calls
them in sequence, and it smushes all the json results into a single
response when everything goes right. (This helps clients avoid extra
latency hops.) It rolls back the transaction when things go wrong in
any one of the composed methods.
TODO: Move this a utils-like module if we end up using it more widely.
'''
json_dict: Dict[str, Any] = {}
with transaction.atomic():
for method, kwargs in method_kwarg_pairs:
response = method(request, user_profile, **kwargs)
if response.status_code != 200:
raise JsonableError(response.content)
json_dict.update(orjson.loads(response.content))
return json_success(json_dict)
check_principals: Validator[Union[List[str], List[int]]] = check_union(
[check_list(check_string), check_list(check_int)],
)
@has_request_variables
def remove_subscriptions_backend(
request: HttpRequest, user_profile: UserProfile,
streams_raw: Iterable[str]=REQ("subscriptions", validator=remove_subscriptions_schema),
principals: Optional[Union[List[str], List[int]]]=REQ(validator=check_principals, default=None),
) -> HttpResponse:
removing_someone_else = check_if_removing_someone_else(user_profile, principals)
streams_as_dict: List[StreamDict] = []
for stream_name in streams_raw:
streams_as_dict.append({"name": stream_name.strip()})
streams, __ = list_to_streams(streams_as_dict, user_profile,
admin_access_required=removing_someone_else)
if principals:
people_to_unsub = {principal_to_user_profile(
user_profile, principal) for principal in principals}
else:
people_to_unsub = {user_profile}
result: Dict[str, List[str]] = dict(removed=[], not_removed=[])
(removed, not_subscribed) = bulk_remove_subscriptions(people_to_unsub, streams,
request.client,
acting_user=user_profile)
for (subscriber, removed_stream) in removed:
result["removed"].append(removed_stream.name)
for (subscriber, not_subscribed_stream) in not_subscribed:
result["not_removed"].append(not_subscribed_stream.name)
return json_success(result)
def you_were_just_subscribed_message(acting_user: UserProfile,
recipient_user: UserProfile,
stream_names: Set[str]) -> str:
subscriptions = sorted(stream_names)
if len(subscriptions) == 1:
with override_language(recipient_user.default_language):
return _("{user_full_name} subscribed you to the stream {stream_name}.").format(
user_full_name=f"@**{acting_user.full_name}**",
stream_name=f"#**{subscriptions[0]}**",
)
with override_language(recipient_user.default_language):
message = _("{user_full_name} subscribed you to the following streams:").format(
user_full_name=f"@**{acting_user.full_name}**",
)
message += "\n\n"
for stream_name in subscriptions:
message += f"* #**{stream_name}**\n"
return message
RETENTION_DEFAULT: Union[str, int] = "realm_default"
EMPTY_PRINCIPALS: Union[Sequence[str], Sequence[int]] = []
@require_non_guest_user
@has_request_variables
def add_subscriptions_backend(
request: HttpRequest,
user_profile: UserProfile,
streams_raw: Iterable[Dict[str, str]]=REQ("subscriptions", validator=add_subscriptions_schema),
invite_only: bool=REQ(validator=check_bool, default=False),
stream_post_policy: int=REQ(validator=check_int_in(
Stream.STREAM_POST_POLICY_TYPES), default=Stream.STREAM_POST_POLICY_EVERYONE),
history_public_to_subscribers: Optional[bool]=REQ(validator=check_bool, default=None),
message_retention_days: Union[str, int]=REQ(validator=check_string_or_int,
default=RETENTION_DEFAULT),
announce: bool=REQ(validator=check_bool, default=False),
principals: Union[Sequence[str], Sequence[int]]=REQ(
validator=check_principals, default=EMPTY_PRINCIPALS,
),
authorization_errors_fatal: bool=REQ(validator=check_bool, default=True),
) -> HttpResponse:
realm = user_profile.realm
stream_dicts = []
color_map = {}
for stream_dict in streams_raw:
# 'color' field is optional
# check for its presence in the streams_raw first
if 'color' in stream_dict:
color_map[stream_dict['name']] = stream_dict['color']
stream_dict_copy: StreamDict = {}
stream_dict_copy["name"] = stream_dict["name"].strip()
# We don't allow newline characters in stream descriptions.
if "description" in stream_dict:
stream_dict_copy["description"] = stream_dict["description"].replace("\n", " ")
stream_dict_copy["invite_only"] = invite_only
stream_dict_copy["stream_post_policy"] = stream_post_policy
stream_dict_copy["history_public_to_subscribers"] = history_public_to_subscribers
stream_dict_copy["message_retention_days"] = parse_message_retention_days(
message_retention_days, Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP)
stream_dicts.append(stream_dict_copy)
# Validation of the streams arguments, including enforcement of
# can_create_streams policy and check_stream_name policy is inside
# list_to_streams.
existing_streams, created_streams = \
list_to_streams(stream_dicts, user_profile, autocreate=True)
authorized_streams, unauthorized_streams = \
filter_stream_authorization(user_profile, existing_streams)
if len(unauthorized_streams) > 0 and authorization_errors_fatal:
return json_error(_("Unable to access stream ({stream_name}).").format(
stream_name=unauthorized_streams[0].name,
))
# Newly created streams are also authorized for the creator
streams = authorized_streams + created_streams
if len(principals) > 0:
if realm.is_zephyr_mirror_realm and not all(stream.invite_only for stream in streams):
return json_error(_("You can only invite other Zephyr mirroring users to private streams."))
if not user_profile.can_subscribe_other_users():
if user_profile.realm.invite_to_stream_policy == Realm.POLICY_ADMINS_ONLY:
return json_error(_("Only administrators can modify other users' subscriptions."))
# Realm.POLICY_MEMBERS_ONLY only fails if the
# user is a guest, which happens in the decorator above.
assert user_profile.realm.invite_to_stream_policy == \
Realm.POLICY_FULL_MEMBERS_ONLY
return json_error(_("Your account is too new to modify other users' subscriptions."))
subscribers = {principal_to_user_profile(user_profile, principal) for principal in principals}
else:
subscribers = {user_profile}
(subscribed, already_subscribed) = bulk_add_subscriptions(realm, streams, subscribers,
acting_user=user_profile, color_map=color_map)
# We can assume unique emails here for now, but we should eventually
# convert this function to be more id-centric.
email_to_user_profile: Dict[str, UserProfile] = {}
result: Dict[str, Any] = dict(subscribed=defaultdict(list), already_subscribed=defaultdict(list))
for sub_info in subscribed:
subscriber = sub_info.user
stream = sub_info.stream
result["subscribed"][subscriber.email].append(stream.name)
email_to_user_profile[subscriber.email] = subscriber
for sub_info in already_subscribed:
subscriber = sub_info.user
stream = sub_info.stream
result["already_subscribed"][subscriber.email].append(stream.name)
result["subscribed"] = dict(result["subscribed"])
result["already_subscribed"] = dict(result["already_subscribed"])
send_messages_for_new_subscribers(
user_profile=user_profile,
subscribers=subscribers,
new_subscriptions=result["subscribed"],
email_to_user_profile=email_to_user_profile,
created_streams=created_streams,
announce=announce,
)
result["subscribed"] = dict(result["subscribed"])
result["already_subscribed"] = dict(result["already_subscribed"])
if not authorization_errors_fatal:
result["unauthorized"] = [s.name for s in unauthorized_streams]
return json_success(result)
def send_messages_for_new_subscribers(
user_profile: UserProfile,
subscribers: Set[UserProfile],
new_subscriptions: Dict[str, List[str]],
email_to_user_profile: Dict[str, UserProfile],
created_streams: List[Stream],
announce: bool,
) -> None:
"""
If you are subscribing lots of new users to new streams,
this function can be pretty expensive in terms of generating
lots of queries and sending lots of messages. We isolate
the code partly to make it easier to test things like
excessive query counts by mocking this function so that it
doesn't drown out query counts from other code.
"""
bots = {subscriber.email: subscriber.is_bot for subscriber in subscribers}
newly_created_stream_names = {s.name for s in created_streams}
# Inform the user if someone else subscribed them to stuff,
# or if a new stream was created with the "announce" option.
notifications = []
if new_subscriptions:
for email, subscribed_stream_names in new_subscriptions.items():
if email == user_profile.email:
# Don't send a Zulip if you invited yourself.
continue
if bots[email]:
# Don't send invitation Zulips to bots
continue
# For each user, we notify them about newly subscribed streams, except for
# streams that were newly created.
notify_stream_names = set(subscribed_stream_names) - newly_created_stream_names
if not notify_stream_names:
continue
sender = get_system_bot(settings.NOTIFICATION_BOT)
recipient_user = email_to_user_profile[email]
msg = you_were_just_subscribed_message(
acting_user=user_profile,
recipient_user=recipient_user,
stream_names=notify_stream_names,
)
notifications.append(
internal_prep_private_message(
realm=user_profile.realm,
sender=sender,
recipient_user=recipient_user,
content=msg))
if announce and len(created_streams) > 0:
notifications_stream = user_profile.realm.get_notifications_stream()
if notifications_stream is not None:
with override_language(notifications_stream.realm.default_language):
if len(created_streams) > 1:
content = _("{user_name} created the following streams: {stream_str}.")
else:
content = _("{user_name} created a new stream {stream_str}.")
topic = _('new streams')
content = content.format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
stream_str=", ".join(f'#**{s.name}**' for s in created_streams)
)
sender = get_system_bot(settings.NOTIFICATION_BOT)
notifications.append(
internal_prep_stream_message(
realm=user_profile.realm,
sender=sender,
stream=notifications_stream,
topic=topic,
content=content,
),
)
if not user_profile.realm.is_zephyr_mirror_realm and len(created_streams) > 0:
sender = get_system_bot(settings.NOTIFICATION_BOT)
for stream in created_streams:
with override_language(stream.realm.default_language):
notifications.append(
internal_prep_stream_message(
realm=user_profile.realm,
sender=sender,
stream=stream,
topic=Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
content=_('Stream created by {user_name}.').format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
),
),
)
if len(notifications) > 0:
do_send_messages(notifications, mark_as_read=[user_profile.id])
@has_request_variables
def get_subscribers_backend(request: HttpRequest, user_profile: UserProfile,
stream_id: int=REQ('stream', converter=to_non_negative_int)) -> HttpResponse:
(stream, sub) = access_stream_by_id(
user_profile,
stream_id,
allow_realm_admin=True,
)
subscribers = get_subscriber_emails(stream, user_profile)
return json_success({'subscribers': subscribers})
# By default, lists all streams that the user has access to --
# i.e. public streams plus invite-only streams that the user is on
@has_request_variables
def get_streams_backend(
request: HttpRequest, user_profile: UserProfile,
include_public: bool=REQ(validator=check_bool, default=True),
include_web_public: bool=REQ(validator=check_bool, default=False),
include_subscribed: bool=REQ(validator=check_bool, default=True),
include_all_active: bool=REQ(validator=check_bool, default=False),
include_default: bool=REQ(validator=check_bool, default=False),
include_owner_subscribed: bool=REQ(validator=check_bool, default=False),
) -> HttpResponse:
streams = do_get_streams(user_profile, include_public=include_public,
include_web_public=include_web_public,
include_subscribed=include_subscribed,
include_all_active=include_all_active,
include_default=include_default,
include_owner_subscribed=include_owner_subscribed)
return json_success({"streams": streams})
@has_request_variables
def get_topics_backend(
request: HttpRequest, maybe_user_profile: Union[UserProfile, AnonymousUser],
stream_id: int=REQ(converter=to_non_negative_int,
path_only=True)) -> HttpResponse:
if not maybe_user_profile.is_authenticated:
is_web_public_query = True
user_profile: Optional[UserProfile] = None
else:
is_web_public_query = False
assert isinstance(maybe_user_profile, UserProfile)
user_profile = maybe_user_profile
assert user_profile is not None
if is_web_public_query:
realm = get_valid_realm_from_request(request)
stream = access_web_public_stream(stream_id, realm)
result = get_topic_history_for_public_stream(recipient_id=stream.recipient_id)
else:
assert user_profile is not None
(stream, sub) = access_stream_by_id(user_profile, stream_id)
result = get_topic_history_for_stream(
user_profile=user_profile,
recipient_id=stream.recipient_id,
public_history=stream.is_history_public_to_subscribers(),
)
return json_success(dict(topics=result))
@require_realm_admin
@has_request_variables
def delete_in_topic(request: HttpRequest, user_profile: UserProfile,
stream_id: int=REQ(converter=to_non_negative_int),
topic_name: str=REQ("topic_name")) -> HttpResponse:
(stream, sub) = access_stream_by_id(user_profile, stream_id)
messages = messages_for_topic(stream.recipient_id, topic_name)
if not stream.is_history_public_to_subscribers():
# Don't allow the user to delete messages that they don't have access to.
deletable_message_ids = UserMessage.objects.filter(
user_profile=user_profile, message_id__in=messages).values_list("message_id", flat=True)
messages = [message for message in messages if message.id in
deletable_message_ids]
do_delete_messages(user_profile.realm, messages)
return json_success()
@require_post
@authenticated_json_view
@has_request_variables
def json_stream_exists(request: HttpRequest, user_profile: UserProfile, stream_name: str=REQ("stream"),
autosubscribe: bool=REQ(validator=check_bool, default=False)) -> HttpResponse:
check_stream_name(stream_name)
try:
(stream, sub) = access_stream_by_name(user_profile, stream_name)
except JsonableError as e:
return json_error(e.msg, status=404)
# access_stream functions return a subscription if and only if we
# are already subscribed.
result = {"subscribed": sub is not None}
# If we got here, we're either subscribed or the stream is public.
# So if we're not yet subscribed and autosubscribe is enabled, we
# should join.
if sub is None and autosubscribe:
bulk_add_subscriptions(user_profile.realm, [stream], [user_profile], acting_user=user_profile)
result["subscribed"] = True
return json_success(result) # results are ignored for HEAD requests
@has_request_variables
def json_get_stream_id(request: HttpRequest,
user_profile: UserProfile,
stream_name: str=REQ('stream')) -> HttpResponse:
(stream, sub) = access_stream_by_name(user_profile, stream_name)
return json_success({'stream_id': stream.id})
@has_request_variables
def update_subscriptions_property(request: HttpRequest,
user_profile: UserProfile,
stream_id: int=REQ(validator=check_int),
property: str=REQ(),
value: str=REQ()) -> HttpResponse:
subscription_data = [{"property": property,
"stream_id": stream_id,
"value": value}]
return update_subscription_properties_backend(request, user_profile,
subscription_data=subscription_data)
@has_request_variables
def update_subscription_properties_backend(
request: HttpRequest, user_profile: UserProfile,
subscription_data: List[Dict[str, Any]]=REQ(
validator=check_list(
check_dict([("stream_id", check_int),
("property", check_string),
("value", check_union([check_string, check_bool]))]),
),
),
) -> HttpResponse:
"""
This is the entry point to changing subscription properties. This
is a bulk endpoint: requestors always provide a subscription_data
list containing dictionaries for each stream of interest.
Requests are of the form:
[{"stream_id": "1", "property": "is_muted", "value": False},
{"stream_id": "1", "property": "color", "value": "#c2c2c2"}]
"""
property_converters = {"color": check_color, "in_home_view": check_bool,
"is_muted": check_bool,
"desktop_notifications": check_bool,
"audible_notifications": check_bool,
"push_notifications": check_bool,
"email_notifications": check_bool,
"pin_to_top": check_bool,
"wildcard_mentions_notify": check_bool}
response_data = []
for change in subscription_data:
stream_id = change["stream_id"]
property = change["property"]
value = change["value"]
if property not in property_converters:
return json_error(_("Unknown subscription property: {}").format(property))
(stream, sub) = access_stream_by_id(user_profile, stream_id)
if sub is None:
return json_error(_("Not subscribed to stream id {}").format(stream_id))
try:
value = property_converters[property](property, value)
except ValidationError as error:
return json_error(error.message)
do_change_subscription_property(user_profile, sub, stream,
property, value, acting_user=user_profile)
response_data.append({'stream_id': stream_id,
'property': property,
'value': value})
return json_success({"subscription_data": response_data})
| {
"content_hash": "b739efef7416ecaaa8d0ec0b20e2ff0e",
"timestamp": "",
"source": "github",
"line_count": 811,
"max_line_length": 109,
"avg_line_length": 42.057953144266335,
"alnum_prop": 0.6385997830484623,
"repo_name": "showell/zulip",
"id": "8e53fa7d016d927e028867472d12634f0df749f0",
"size": "34109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/views/streams.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
} |
'''
Created on 10 dec. 2013
@author: TuRz4m
'''
import ConfigParser
import logging
import sys
from api.APIUtils import BetaSerieAPI, BadLoginException, SickBeardAPI
import os.path
logger = logging.getLogger(__name__)
logging.getLogger(__name__).setLevel(logging.INFO)
logging.getLogger(__name__).addHandler(logging.StreamHandler())
logging.getLogger(__name__).addHandler(logging.FileHandler("logs/BetaBeard.log"))
configFile = "BetaBeard.ini"
configDbFile = "BetaBeard.db"
param = {}
paramDb = {}
"""
Load the config file & fill all the var.
"""
def checkConfig(config):
try:
global param
param['login'] = config.get("BetaSeries", "login")
param['password'] = config.get("BetaSeries", "password")
param['sburl'] = config.get("SickBeard", "url")
if (config.getboolean("SickBeard", "https")):
param['scheme'] = "https"
param['apikey'] = config.get("SickBeard", "apikey")
param['location'] = config.get("SickBeard", "location")
if (param['location'] == ""):
param['location'] = None
param['lang'] = config.get("SickBeard", "lang")
if (param['lang'] == ""):
param['lang'] = None
param['flatten_folder'] = config.get("SickBeard", "flatten_folder")
if (param['flatten_folder'] == ""):
param['flatten_folder'] = None
param['status'] = config.get("SickBeard", "status")
if (param['status'] == ""):
param['status'] = None
param['initial'] = config.get("SickBeard", "initial")
if (param['initial'] == ""):
param['initial'] = None
param['archive'] = config.get("SickBeard", "archive")
if (param['archive'] == ""):
param['archive'] = None
param['fullUpdate'] = config.getboolean("BetaBeard", "fullUpdate")
param['checkTimeLine'] = config.getboolean("BetaBeard", "checkTimeLine")
param['demoMode'] = config.getboolean("BetaBeard", "demoMode")
except ConfigParser.NoOptionError as ex:
logger.error("[BetaBeard] Error in config file : %s", ex)
return False
except ConfigParser.NoSectionError as ex:
logger.error("[BetaBeard] Error in config file : %s", ex)
return False
return True
def loadDb(configToLoad):
global paramDb
if (os.path.exists(configDbFile)):
configToLoad.read(configDbFile)
try:
paramDb['last_event_id'] = configToLoad.get("BetaBeard", "last_event_id")
if (paramDb['last_event_id'] == ""):
paramDb['last_event_id'] = None
except ConfigParser.NoOptionError:
logger.debug("[BetaBeard] Config file Tech not found. Use default.")
paramDb['last_event_id'] = None
except ConfigParser.NoSectionError:
logger.debug("[BetaBeard] Config file Tech not found. Use default.")
configToLoad.add_section("BetaBeard")
paramDb['last_event_id'] = None
"""
Update the BetaBeard-tech.ini
"""
def updateDb(configToSave):
logger.debug("[BetaBeard] Update file %s", configDbFile)
cfgfile = open(configDbFile,'w')
configToSave.write(cfgfile)
cfgfile.close()
logger.debug("[BetaBeard] File %s updated.", configDbFile)
if __name__ == '__main__':
# First of all, we need to reed the BetaBeard.ini config file.
config = ConfigParser.SafeConfigParser()
configDb = ConfigParser.SafeConfigParser()
if (os.path.exists(configFile) == False):
logger.error("[BetaBeard] Config file %s not found.", configFile)
sys.exit(0)
config.read(configFile)
loadDb(configDb)
if checkConfig(config) == False:
sys.exit(0)
# ----------- Init BetaSeries ----------- #
try:
beta = BetaSerieAPI(param['login'], param['password'])
except BadLoginException as ex:
logger.error("[BetaBeard] can't log into BetaSeries.com : %s", ex.value)
sys.exit(0)
logger.info("[BetaBeard] Login successfull.")
# ----------- Init SickBeard ----------- #
sickBeard = SickBeardAPI(param['sburl'], param['scheme'], param['apikey'])
# ----------- Test SickBeard ----------- #
if (sickBeard.ping() == False):
logger.error("[BetaBeard] Can't ping SickBeard on url : %s://%s with apikey = %s",param['scheme'], param['sburl'], param['apikey'])
sys.exit(0)
logger.info("[BetaBeard] Ping SickBeard successfull.")
# ----------- If fullUpdate, we retrieve all the current show and add them to sickbear.----------- #
if paramDb['last_event_id'] == None:
logger.debug("[BetaBeard] last_index_id is None")
if param['fullUpdate'] == True:
shows = beta.show_list();
logger.debug("[BetaBeard] shows : %s", shows)
logger.info("[BetaBeard] Start processing shows.")
for show in shows:
logger.info("[BetaBeard] Add show in SickBeard : %s (%s)", show[1], show[0])
if (param['demoMode'] == False):
success,message = sickBeard.add_show(show[0], param['location'], param['lang'], param['flatten_folder'], param['status'], param['initial'], param['archive'])
if (success == False):
logger.error("[BetaBeard] Can't add show %s (%s) to sickbeard : %s", show[1], show[0], message)
# ----------- retrieve last event processed in betaseries----------- #
param['last_event_id'], emptyList = beta.timeline_since(None)
elif param['checkTimeLine']:
logger.info("[BetaBeard] Start processing timeline.")
param['last_event_id'], events = beta.timeline_since(paramDb['last_event_id'])
logger.debug("[BetaBeard] Processing timeline : %s", events)
if (events != None):
for event in events:
logger.debug("[BetaBeard] Event : %s", event)
# - ADD SERIE - #
if (event['type'] == 'add_serie'):
betaid = str(event['ref_id']);
tvdbid, title = beta.shows_tvdbid(betaid)
logger.info("[BetaBeard] Add Show to sickbeard : %s (%s)", title, tvdbid)
if (param['demoMode'] == False):
success,message = sickBeard.add_show(tvdbid, param['location'], param['lang'], param['flatten_folder'], param['status'], param['initial'], param['archive'])
if (success == False):
logger.error("[BetaBeard] Can't add show %s (%s) to sickbeard : %s.", title, tvdbid, message)
# - DELETE SERIE - #
elif (event['type'] == 'del_serie'):
betaid = str(event['ref_id']);
tvdbid, title = beta.shows_tvdbid(betaid)
logger.info("[BetaBeard] Delete Show from sickbeard : %s (%s)", title, tvdbid)
if (param['demoMode'] == False):
success, message = sickBeard.del_show(tvdbid)
if (success == False):
logger.error("[BetaBeard] Can't delete show %s (%s) from sickbeard : %s.", title, tvdbid, message)
# - PAUSE SERIE - #
elif (event['type'] == 'archive'):
betaid = str(event['ref_id']);
tvdbid, title = beta.shows_tvdbid(betaid)
logger.info("[BetaBeard] Archive Show on sickbeard : %s (%s)", title, tvdbid)
if (param['demoMode'] == False):
success, message = sickBeard.pause_show(tvdbid, 1)
if (success == False):
logger.error("[BetaBeard] Can't pause show %s (%s) on sickbeard : %s.", title, tvdbid, message)
# - UNPAUSE SERIE - #
elif (event['type'] == 'unarchive'):
betaid = str(event['ref_id']);
tvdbid, title = beta.shows_tvdbid(betaid)
logger.info("[BetaBeard] UnArchive Show on sickbeard : %s (%s)", title, tvdbid)
if (param['demoMode'] == False):
success, message = sickBeard.pause_show(tvdbid, 0)
if (success == False):
logger.error("[BetaBeard] Can't unpause show %s (%s) on sickbeard : %s.", title, tvdbid, message)
logger.info("[BetaBeard] Timeline processing done.")
# ----------- Update Last_event_id in config file.----------- #
if (param['last_event_id'] != None):
logger.debug("[BetaBeard] update config with last_event_id=%s", param['last_event_id'])
configDb.set("BetaBeard", "last_event_id", str(param['last_event_id']));
updateDb(configDb);
else:
logger.debug("[BetaBeard] Can't update config file because last_event_id is null")
| {
"content_hash": "611aab5308d7df40eac2ad23517ac552",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 185,
"avg_line_length": 38.736170212765956,
"alnum_prop": 0.5448753158299462,
"repo_name": "TuRz4m/BetaBeard",
"id": "3e421af24838ddaca7fa83c53e020f951139a8b2",
"size": "9103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "betabeard/BetaBeard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30976"
}
],
"symlink_target": ""
} |
from datetime import timedelta, datetime
from mock import Mock, MagicMock
from azurelinuxagent.common.osutil.default import DefaultOSUtil
from azurelinuxagent.common.protocol.goal_state import RemoteAccess
from azurelinuxagent.common.protocol.util import ProtocolUtil
from azurelinuxagent.common.protocol.wire import WireProtocol
from azurelinuxagent.ga.remoteaccess import RemoteAccessHandler
from tests.tools import AgentTestCase, load_data, patch, clear_singleton_instances
from tests.protocol.mocks import mock_wire_protocol
from tests.protocol.mockwiredata import DATA_FILE, DATA_FILE_REMOTE_ACCESS
class MockOSUtil(DefaultOSUtil):
def __init__(self): # pylint: disable=super-init-not-called
self.all_users = {}
self.sudo_users = set()
self.jit_enabled = True
def useradd(self, username, expiration=None, comment=None):
if username == "":
raise Exception("test exception for bad username")
if username in self.all_users:
raise Exception("test exception, user already exists")
self.all_users[username] = (username, None, None, None, comment, None, None, expiration)
def conf_sudoer(self, username, nopasswd=False, remove=False):
if not remove:
self.sudo_users.add(username)
else:
self.sudo_users.remove(username)
def chpasswd(self, username, password, crypt_id=6, salt_len=10):
if password == "":
raise Exception("test exception for bad password")
user = self.all_users[username]
self.all_users[username] = (user[0], password, user[2], user[3], user[4], user[5], user[6], user[7])
def del_account(self, username):
if username == "":
raise Exception("test exception, bad data")
if username not in self.all_users:
raise Exception("test exception, user does not exist to delete")
self.all_users.pop(username)
def get_users(self):
return self.all_users.values()
def get_user_dictionary(users):
user_dictionary = {}
for user in users:
user_dictionary[user[0]] = user
return user_dictionary
def mock_add_event(name, op, is_success, version, message):
TestRemoteAccessHandler.eventing_data = (name, op, is_success, version, message)
class TestRemoteAccessHandler(AgentTestCase):
eventing_data = [()]
def setUp(self):
super(TestRemoteAccessHandler, self).setUp()
# Since ProtocolUtil is a singleton per thread, we need to clear it to ensure that the test cases do not
# reuse a previous state
clear_singleton_instances(ProtocolUtil)
for data in TestRemoteAccessHandler.eventing_data:
del data
# add_user tests
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_add_user(self, *_):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
tstpassword = "]aPPEv}uNg1FPnl?"
tstuser = "foobar"
expiration_date = datetime.utcnow() + timedelta(days=1)
pwd = tstpassword
rah._add_user(tstuser, pwd, expiration_date)
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
actual_user = users[tstuser]
expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")
self.assertEqual(actual_user[7], expected_expiration)
self.assertEqual(actual_user[4], "JIT_Account")
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_add_user_bad_creation_data(self, *_):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
tstpassword = "]aPPEv}uNg1FPnl?"
tstuser = ""
expiration = datetime.utcnow() + timedelta(days=1)
pwd = tstpassword
error = "test exception for bad username"
self.assertRaisesRegex(Exception, error, rah._add_user, tstuser, pwd, expiration)
self.assertEqual(0, len(rah._os_util.get_users()))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="")
def test_add_user_bad_password_data(self, *_):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
tstpassword = ""
tstuser = "foobar"
expiration = datetime.utcnow() + timedelta(days=1)
pwd = tstpassword
error = "test exception for bad password"
self.assertRaisesRegex(Exception, error, rah._add_user, tstuser, pwd, expiration)
self.assertEqual(0, len(rah._os_util.get_users()))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_add_user_already_existing(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
tstpassword = "]aPPEv}uNg1FPnl?"
tstuser = "foobar"
expiration_date = datetime.utcnow() + timedelta(days=1)
pwd = tstpassword
rah._add_user(tstuser, pwd, expiration_date)
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
self.assertEqual(1, len(users.keys()))
actual_user = users[tstuser]
self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d"))
# add the new duplicate user, ensure it's not created and does not overwrite the existing user.
# this does not test the user add function as that's mocked, it tests processing skips the remaining
# calls after the initial failure
new_user_expiration = datetime.utcnow() + timedelta(days=5)
self.assertRaises(Exception, rah._add_user, tstuser, pwd, new_user_expiration)
# refresh users
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users after dup user attempted".format(tstuser))
self.assertEqual(1, len(users.keys()))
actual_user = users[tstuser]
self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d"))
# delete_user tests
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_delete_user(self, *_):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
tstpassword = "]aPPEv}uNg1FPnl?"
tstuser = "foobar"
expiration_date = datetime.utcnow() + timedelta(days=1)
expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") # pylint: disable=unused-variable
pwd = tstpassword
rah._add_user(tstuser, pwd, expiration_date)
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
rah._remove_user(tstuser)
# refresh users
users = get_user_dictionary(rah._os_util.get_users())
self.assertFalse(tstuser in users)
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_new_user(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_single_account.xml')
remote_access = RemoteAccess(data_str)
tstuser = remote_access.user_list.users[0].name
expiration_date = datetime.utcnow() + timedelta(days=1)
expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
remote_access.user_list.users[0].expiration = expiration
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
actual_user = users[tstuser]
expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")
self.assertEqual(actual_user[7], expected_expiration)
self.assertEqual(actual_user[4], "JIT_Account")
def test_do_not_add_expired_user(self):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_single_account.xml')
remote_access = RemoteAccess(data_str)
expiration = (datetime.utcnow() - timedelta(days=2)).strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
remote_access.user_list.users[0].expiration = expiration
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertFalse("testAccount" in users)
def test_error_add_user(self):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
tstuser = "foobar"
expiration = datetime.utcnow() + timedelta(days=1)
pwd = "bad password"
error = r"\[CryptError\] Error decoding secret\nInner error: Incorrect padding"
self.assertRaisesRegex(Exception, error, rah._add_user, tstuser, pwd, expiration)
users = get_user_dictionary(rah._os_util.get_users())
self.assertEqual(0, len(users))
def test_handle_remote_access_no_users(self):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_no_accounts.xml')
remote_access = RemoteAccess(data_str)
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertEqual(0, len(users.keys()))
def test_handle_remote_access_validate_jit_user_valid(self):
rah = RemoteAccessHandler(Mock())
comment = "JIT_Account"
result = rah._is_jit_user(comment)
self.assertTrue(result, "Did not identify '{0}' as a JIT_Account".format(comment))
def test_handle_remote_access_validate_jit_user_invalid(self):
rah = RemoteAccessHandler(Mock())
test_users = ["John Doe", None, "", " "]
failed_results = ""
for user in test_users:
if rah._is_jit_user(user):
failed_results += "incorrectly identified '{0} as a JIT_Account'. ".format(user)
if len(failed_results) > 0:
self.fail(failed_results)
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_multiple_users(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_two_accounts.xml')
remote_access = RemoteAccess(data_str)
testusers = []
count = 0
while count < 2:
user = remote_access.user_list.users[count].name
expiration_date = datetime.utcnow() + timedelta(days=count + 1)
expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
remote_access.user_list.users[count].expiration = expiration
testusers.append(user)
count += 1
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(testusers[0] in users, "{0} missing from users".format(testusers[0]))
self.assertTrue(testusers[1] in users, "{0} missing from users".format(testusers[1]))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
# max fabric supports in the Goal State
def test_handle_remote_access_ten_users(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
count = 0
for user in remote_access.user_list.users:
count += 1
user.name = "tstuser{0}".format(count)
expiration_date = datetime.utcnow() + timedelta(days=count)
user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertEqual(10, len(users.keys()))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_user_removed(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
count = 0
for user in remote_access.user_list.users:
count += 1
user.name = "tstuser{0}".format(count)
expiration_date = datetime.utcnow() + timedelta(days=count)
user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertEqual(10, len(users.keys()))
del rah._remote_access.user_list.users[:]
self.assertEqual(10, len(users.keys()))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_bad_data_and_good_data(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
count = 0
for user in remote_access.user_list.users:
count += 1
user.name = "tstuser{0}".format(count)
if count == 2:
user.name = ""
expiration_date = datetime.utcnow() + timedelta(days=count)
user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertEqual(9, len(users.keys()))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_deleted_user_readded(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_single_account.xml')
remote_access = RemoteAccess(data_str)
tstuser = remote_access.user_list.users[0].name
expiration_date = datetime.utcnow() + timedelta(days=1)
expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
remote_access.user_list.users[0].expiration = expiration
rah._remote_access = remote_access
rah._handle_remote_access()
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
os_util = rah._os_util
os_util.__class__ = MockOSUtil
os_util.all_users.clear() # pylint: disable=no-member
# refresh users
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser not in users)
rah._handle_remote_access()
# refresh users
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
@patch('azurelinuxagent.common.osutil.get_osutil', return_value=MockOSUtil())
@patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', return_value=WireProtocol("12.34.56.78"))
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_remote_access', return_value="asdf")
def test_remote_access_handler_run_bad_data(self, _1, _2, _3, _4):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
tstpassword = "]aPPEv}uNg1FPnl?"
tstuser = "foobar"
expiration_date = datetime.utcnow() + timedelta(days=1)
pwd = tstpassword
rah._add_user(tstuser, pwd, expiration_date)
users = get_user_dictionary(rah._os_util.get_users())
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
rah.run()
self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_multiple_users_one_removed(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
count = 0
for user in remote_access.user_list.users:
count += 1
user.name = "tstuser{0}".format(count)
expiration_date = datetime.utcnow() + timedelta(days=count)
user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
rah._remote_access = remote_access
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertEqual(10, len(users))
# now remove the user from RemoteAccess
deleted_user = rah._remote_access.user_list.users[3]
del rah._remote_access.user_list.users[3]
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user))
self.assertEqual(9, len(users))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_multiple_users_null_remote_access(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
count = 0
for user in remote_access.user_list.users:
count += 1
user.name = "tstuser{0}".format(count)
expiration_date = datetime.utcnow() + timedelta(days=count)
user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
rah._remote_access = remote_access
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertEqual(10, len(users))
# now remove the user from RemoteAccess
rah._remote_access = None
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertEqual(0, len(users))
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_multiple_users_error_with_null_remote_access(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
count = 0
for user in remote_access.user_list.users:
count += 1
user.name = "tstuser{0}".format(count)
expiration_date = datetime.utcnow() + timedelta(days=count)
user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
rah._remote_access = remote_access
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertEqual(10, len(users))
# now remove the user from RemoteAccess
rah._remote_access = None
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertEqual(0, len(users))
def test_remove_user_error(self):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
error = "test exception, bad data"
self.assertRaisesRegex(Exception, error, rah._remove_user, "")
def test_remove_user_not_exists(self):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
user = "bob"
error = "test exception, user does not exist to delete"
self.assertRaisesRegex(Exception, error, rah._remove_user, user)
@patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?")
def test_handle_remote_access_remove_and_add(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
rah = RemoteAccessHandler(Mock())
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
count = 0
for user in remote_access.user_list.users:
count += 1
user.name = "tstuser{0}".format(count)
expiration_date = datetime.utcnow() + timedelta(days=count)
user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC"
rah._remote_access = remote_access
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertEqual(10, len(users))
# now remove the user from RemoteAccess
new_user = "tstuser11"
deleted_user = rah._remote_access.user_list.users[3]
rah._remote_access.user_list.users[3].name = new_user
rah._handle_remote_access()
users = rah._os_util.get_users()
self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user))
self.assertTrue(new_user in [u[0] for u in users], "user {0} not in users".format(new_user))
self.assertEqual(10, len(users))
@patch('azurelinuxagent.ga.remoteaccess.add_event', side_effect=mock_add_event)
def test_remote_access_handler_run_error(self, _):
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=MockOSUtil()):
mock_protocol = WireProtocol("foo.bar")
mock_protocol.client.get_remote_access = MagicMock(side_effect=Exception("foobar!"))
rah = RemoteAccessHandler(mock_protocol)
rah.run()
print(TestRemoteAccessHandler.eventing_data)
check_message = "foobar!"
self.assertTrue(check_message in TestRemoteAccessHandler.eventing_data[4],
"expected message {0} not found in {1}"
.format(check_message, TestRemoteAccessHandler.eventing_data[4]))
self.assertEqual(False, TestRemoteAccessHandler.eventing_data[2], "is_success is true")
def test_remote_access_handler_should_retrieve_users_when_it_is_invoked_the_first_time(self):
mock_os_util = MagicMock()
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=mock_os_util):
with mock_wire_protocol(DATA_FILE) as mock_protocol:
rah = RemoteAccessHandler(mock_protocol)
rah.run()
self.assertTrue(len(mock_os_util.get_users.call_args_list) == 1, "The first invocation of remote access should have retrieved the current users")
def test_remote_access_handler_should_retrieve_users_when_goal_state_contains_jit_users(self):
mock_os_util = MagicMock()
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=mock_os_util):
with mock_wire_protocol(DATA_FILE_REMOTE_ACCESS) as mock_protocol:
rah = RemoteAccessHandler(mock_protocol)
rah.run()
self.assertTrue(len(mock_os_util.get_users.call_args_list) > 0, "A goal state with jit users did not retrieve the current users")
def test_remote_access_handler_should_not_retrieve_users_when_goal_state_does_not_contain_jit_users(self):
mock_os_util = MagicMock()
with patch("azurelinuxagent.ga.remoteaccess.get_osutil", return_value=mock_os_util):
with mock_wire_protocol(DATA_FILE) as mock_protocol:
rah = RemoteAccessHandler(mock_protocol)
rah.run() # this will trigger one call to retrieve the users
mock_protocol.mock_wire_data.set_incarnation(123) # mock a new goal state; the data file does not include any jit users
rah.run()
self.assertTrue(len(mock_os_util.get_users.call_args_list) == 1, "A goal state without jit users retrieved the current users")
| {
"content_hash": "62bc08316699a55af1f17d39622ce97f",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 161,
"avg_line_length": 54.520242914979754,
"alnum_prop": 0.6244012920951992,
"repo_name": "Azure/WALinuxAgent",
"id": "37187702e3812f537132ef04d4288be354c9a81e",
"size": "27557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ga/test_remoteaccess_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3073264"
},
{
"name": "Shell",
"bytes": "19249"
}
],
"symlink_target": ""
} |
"""
Script to handle interwiki links based on Wikibase.
This script connects pages to Wikibase items using language links on the page.
If multiple language links are present, and they are connected to different
items, the bot skips. After connecting the page to an item, language links
can be removed from the page.
These command line parameters can be used to specify which pages to work on:
¶ms;
Furthermore, the following command line parameters are supported:
-clean Clean pages.
-create Create items.
-merge Merge items.
-summary: Use your own edit summary for cleaning the page.
"""
# (C) Pywikibot team, 2015-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals, absolute_import
__version__ = '$Id$'
#
import pywikibot
from pywikibot import pagegenerators, output, warning
from pywikibot.bot import ExistingPageBot, SingleSiteBot, suggest_help
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
# Allowed namespaces. main, project, template, category
namespaces = [0, 4, 10, 14]
# TODO: Some templates on pages, like csd, inuse and afd templates,
# should cause the bot to skip the page, see T134497
class IWBot(ExistingPageBot, SingleSiteBot):
"""The bot for interwiki."""
def __init__(self, **kwargs):
"""Construct the bot."""
self.availableOptions.update({
'clean': False,
'create': False,
'merge': False,
'summary': None,
'ignore_ns': False, # used by interwikidata_tests only
})
super(IWBot, self).__init__(**kwargs)
if not self.site.has_data_repository:
raise ValueError('{site} does not have a data repository, '
'use interwiki.py instead.'.format(
site=self.site))
self.repo = self.site.data_repository()
if not self.getOption('summary'):
self.options['summary'] = pywikibot.i18n.twtranslate(
self.site, 'interwikidata-clean-summary')
def treat_page(self):
"""Check page."""
if (self.current_page.namespace() not in namespaces and
not self.getOption('ignore_ns')):
output('{page} is not in allowed namespaces, skipping'
.format(page=self.current_page.title(
asLink=True)))
return False
self.iwlangs = pywikibot.textlib.getLanguageLinks(
self.current_page.text, insite=self.current_page.site)
if not self.iwlangs:
output('No interlanguagelinks on {page}'.format(
page=self.current_page.title(asLink=True)))
return False
try:
item = pywikibot.ItemPage.fromPage(self.current_page)
except pywikibot.NoPage:
item = None
if item is None:
item = self.try_to_add()
if self.getOption('create') and item is None:
item = self.create_item()
else:
if self.getOption('merge'):
item = self.try_to_merge(item)
if item and self.getOption('clean'):
self.current_item = item
self.clean_page()
def create_item(self):
"""Create item in repo for current_page."""
data = {'sitelinks':
{self.site.dbName():
{'site': self.site.dbName(),
'title': self.current_page.title()}
},
'labels':
{self.site.lang:
{'language': self.site.lang,
'value': self.current_page.title()}
}
}
for site, page in self.iwlangs.items():
if not page.exists():
continue
dbname = site.dbName()
title = page.title()
data['sitelinks'][dbname] = {'site': dbname, 'title': title}
data['labels'][site.lang] = {'language': site.lang, 'value': title}
summary = ('Bot: New item with sitelink(s) from %s'
% self.current_page.title(asLink=True, insite=self.repo))
item = pywikibot.ItemPage(self.repo)
item.editEntity(data, new='item', summary=summary)
output('Created item {item}'.format(item=item.getID()))
return item
def handle_complicated(self):
"""
Handle pages when they have interwiki conflict.
When this method returns True it means conflict has resolved
and it's okay to clean old interwiki links.
This method should change self.current_item and fix conflicts.
Change it in subclasses.
"""
return False
def clean_page(self):
"""Clean interwiki links from the page."""
if not self.iwlangs:
return
dbnames = [iw_site.dbName() for iw_site in self.iwlangs]
if set(dbnames) - set(self.current_item.sitelinks.keys()):
if not self.handle_complicated():
warning('Interwiki conflict in %s, skipping...' %
self.current_page.title(asLink=True))
return False
output('Cleaning up the page')
new_text = pywikibot.textlib.removeLanguageLinks(
self.current_page.text, site=self.current_page.site)
self.put_current(new_text, summary=self.getOption('summary'))
def get_items(self):
"""Return all items of pages linked through the interwiki."""
wd_data = set()
for iw_page in self.iwlangs.values():
if not iw_page.exists():
warning('Interwiki %s does not exist, skipping...' %
iw_page.title(asLink=True))
continue
try:
wd_data.add(pywikibot.ItemPage.fromPage(iw_page))
except pywikibot.NoPage:
output('Interwiki %s does not have an item' %
iw_page.title(asLink=True))
return wd_data
def try_to_add(self):
"""Add current page in repo."""
wd_data = self.get_items()
if not wd_data:
# will create a new item with interwiki
return None
if len(wd_data) > 1:
warning('Interwiki conflict in %s, skipping...' %
self.current_page.title(asLink=True))
return False
item = list(wd_data).pop()
if self.current_page.site.dbName() in item.sitelinks:
warning('Interwiki conflict in %s, skipping...' %
item.title(asLink=True))
return False
output('Adding link to %s' % item.title())
item.setSitelink(self.current_page)
return item
def try_to_merge(self, item):
"""Merge two items."""
wd_data = self.get_items()
if not wd_data:
# todo: add links to item
return None
if len(wd_data) > 1:
warning('Interwiki conflict in %s, skipping...' %
self.current_page.title(asLink=True))
return False
target_item = list(wd_data).pop()
try:
item.mergeInto(target_item)
except pywikibot.data.api.APIError:
# warning already printed by the API
return False
else:
target_item.get(force=True)
return target_item
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
options = {}
for arg in local_args:
if genFactory.handleArg(arg):
continue
option, sep, value = arg.partition(':')
option = option[1:] if option.startswith('-') else None
if option == 'summary':
options[option] = value
else:
options[option] = True
site = pywikibot.Site()
generator = genFactory.getCombinedGenerator(preload=True)
if generator:
bot = IWBot(generator=generator, site=site, **options)
bot.run()
else:
suggest_help(missing_generator=True)
return False
if __name__ == '__main__':
main()
| {
"content_hash": "72a1c9f23096b43e58b42829bec2b6a6",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 34.149193548387096,
"alnum_prop": 0.5742118313850514,
"repo_name": "hasteur/g13bot_tools_new",
"id": "a480dfda39954d94baded973dc10cbd4ce3a73d5",
"size": "8511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/interwikidata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4512430"
},
{
"name": "Shell",
"bytes": "4824"
}
],
"symlink_target": ""
} |
"""
MoinMoin - run standalone server, optionally daemonizing it
@copyright: 2008 MoinMoin:ForrestVoight
@license: GNU GPL, see COPYING for details.
"""
import os
import sys
import signal
from MoinMoin.script import MoinScript
from MoinMoin.util.daemon import Daemon
from MoinMoin.web.serving import run_server
class PluginScript(MoinScript):
"""\
Purpose:
========
This tool allows you to start a standalone server
Detailed Instructions:
======================
General syntax: moin [options] server standalone [standalone-options]
[options] usually should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
[standalone-options] see below:
Please note:
* You must run this script as the owner of the wiki files.
* You should not run this script as root. You should use --user and
--group to run the server without superuser privileges.
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
self.parser.add_option(
"--docs", dest="docs",
help="Set the documents directory. Default: use builtin MoinMoin/web/static/htdocs"
)
self.parser.add_option(
"--user", dest="user",
help="Set the user to change to. UNIX only. Default: Don't change"
)
self.parser.add_option(
"--group", dest="group",
help="Set the group to change to. UNIX only. Default: Don't change"
)
self.parser.add_option(
"--port", dest="port", type="int",
help="Set the port to listen on. Default: 8080"
)
self.parser.add_option(
"--hostname", "--interface", dest="hostname",
help="Set the ip/hostname to listen on. Use \"\" for all interfaces. Default: localhost"
)
self.parser.add_option(
"--start", dest="start", action="store_true",
help="Start server in background."
)
self.parser.add_option(
"--stop", dest="stop", action="store_true",
help="Stop server in background."
)
self.parser.add_option(
"--pidfile", dest="pidfile",
help="Set file to store pid of moin daemon in. Default: moin.pid"
)
self.parser.add_option(
"--debug", dest="debug",
help="Debug mode of server. off: no debugging (default), web: for browser based debugging, external: for using an external debugger."
)
def mainloop(self):
# we don't expect non-option arguments
if self.args:
self.parser.error("incorrect number of arguments")
pidfile = "moin.pid"
if self.options.pidfile:
pidfile = self.options.pidfile
if self.options.stop:
try:
pids = open(pidfile, "r").read()
except IOError:
print "pid file not found (server not running?)"
else:
try:
os.kill(int(pids), signal.SIGTERM)
except OSError:
print "kill failed (server not running?)"
os.remove(pidfile)
else:
try:
if self.options.config_dir:
sys.path.insert(0, self.options.config_dir)
from wikiserverconfig import Config
except ImportError, err:
if 'wikiserverconfig' in str(err):
# we are unable to import from wikiserverconfig module
Config = DefaultConfig
else:
# some other import went wrong
raise
# intialize some defaults if missing
kwargs = {}
for option in ('user', 'group',
'hostname', 'port',
'threaded', 'processes',
'debug', 'use_evalex',
'use_reloader', 'extra_files', 'reloader_interval',
'docs', 'static_files', ):
if hasattr(Config, option):
kwargs[option] = getattr(Config, option)
else:
# usually inheriting from DefaultConfig should make this superfluous,
# but who knows what users write into their config...
kwargs[option] = getattr(DefaultConfig, option)
# override config settings with cmdline options:
if self.options.docs:
kwargs['docs'] = self.options.docs
if self.options.user:
kwargs['user'] = self.options.user
if self.options.group:
kwargs['group'] = self.options.group
if self.options.debug:
kwargs['debug'] = self.options.debug
if self.options.hostname is not None: # needs to work for "" value also
kwargs['hostname'] = self.options.hostname
if self.options.port:
kwargs['port'] = self.options.port
if self.options.start:
daemon = Daemon('moin', pidfile, run_server, **kwargs)
daemon.do_start()
else:
run_server(**kwargs)
class DefaultConfig(object):
# where the static data is served from - you can either use:
# docs = True # serve the builtin static data from MoinMoin/web/static/htdocs/
# docs = '/where/ever/you/like/to/keep/htdocs' # serve it from the given path
# docs = False # do not serve static files at all (will not work except
# # you serve them in some other working way)
docs = True
# user and group to run moin as:
user = None
group = None
# debugging options: 'off', 'web', 'external'
debug = 'off'
# should the exception evaluation feature be enabled?
use_evalex = True
# Werkzeug run_simple arguments below here:
# hostname/ip and port the server listens on:
hostname = 'localhost'
port = 8080
# either multi-thread or multi-process (not both):
# threaded = True, processes = 1 is usually what you want
# threaded = False, processes = 10 (for example) can be rather slow
# thus, if you need a forking server, maybe rather use apache/mod-wsgi!
threaded = True
processes = 1
# automatic code reloader - needs testing!
use_reloader = False
extra_files = None
reloader_interval = 1
# we can't use static_files to replace our own middleware setup for moin's
# static files, because we also need the setup with other servers (like
# apache), not just when using werkzeug's run_simple server.
# But you can use it if you need to serve other static files you just need
# with the standalone wikiserver.
static_files = None
| {
"content_hash": "df6f282e4b6a35c248e01286b3287de6",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 145,
"avg_line_length": 36.6096256684492,
"alnum_prop": 0.5718667835232253,
"repo_name": "RealTimeWeb/wikisite",
"id": "80a72e27301771a6371af76d565b090895aff973",
"size": "6875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/script/server/standalone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import io
import sys
import unittest
def is_weird(value):
if (value % 2) == 1:
return True
if 2 <= value <= 5:
return False
if 6 <= value <= 20:
return True
return False
def main():
print('Weird' if is_weird(int(input().strip())) else 'Not Weird')
if __name__ == '__main__': # pragma: no cover
main()
class TestCode(unittest.TestCase):
def generalized_test(self, which):
sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')
sys.stdout = io.StringIO()
expected = open(__file__.replace('.py', f'.{which}.out'), 'r')
main()
self.assertEqual(sys.stdout.getvalue(), expected.read())
for handle in [sys.stdin, sys.stdout, expected]:
handle.close()
def test_0(self):
self.generalized_test('0')
def test_1_to_24(self):
expected = [
None, True, False, True, False, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True,
True, True, False, True, False]
result = [None]
for value in range(1, 25):
result.append(is_weird(value))
self.assertEqual(result, expected)
| {
"content_hash": "486eddb6374b74bc4ac1404724ed342c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 27.022222222222222,
"alnum_prop": 0.5542763157894737,
"repo_name": "altermarkive/Coding-Interviews",
"id": "020fb0fa17764450cc746c65510ef1871546f2b2",
"size": "1291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithm-design/hackerrank/py_if_else/test_py_if_else.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
__author__ = "Markus Gumbel"
__copyright__ = "The authors"
__license__ = "Apache 2"
__email__ = "m.gumbel@hs-mannheim.de"
__status__ = "Production"
from PySteppables import SteppableBasePy
class ModuroSteppable(SteppableBasePy):
def __init__(self, simulator, model, _frequency=1):
SteppableBasePy.__init__(self, simulator, _frequency)
self.model = model
self.execConfig = model.execConfig
def step(self, mcs):
if not self.execConfig.interuptMCS(mcs):
self.moduroStep(mcs) # better: not MCS but time!
# Abstract method:
def moduroStep(self, mcs):
return None
| {
"content_hash": "23fa74fbdf892bbb0ef556eafbce7d09",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 30,
"alnum_prop": 0.6492063492063492,
"repo_name": "informatik-mannheim/Moduro-CC3D",
"id": "25261e3ab5efe95f5ce616b8e34e6c61b25ef771",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Simulation/Steppable/ModuroSteppable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "215449"
}
],
"symlink_target": ""
} |
"""
Authors: Thomas J. Sargent, John Stachurski
Filename: lss.py
Computes quantities related to the Gaussian linear state space model
x_{t+1} = A x_t + C w_{t+1}
y_t = G x_t
The shocks {w_t} are iid and N(0, I)
"""
from textwrap import dedent
import numpy as np
from numpy.random import multivariate_normal
from scipy.linalg import solve
class LSS(object):
"""
A class that describes a Gaussian linear state space model of the
form:
.. math::
x_{t+1} = A x_t + C w_{t+1}
.. math::
y_t = G x_t
where {w_t} are iid and N(0, I). If the initial conditions mu_0
and Sigma_0 for x_0 ~ N(mu_0, Sigma_0) are not supplied, both
are set to zero. When Sigma_0=0, the draw of x_0 is exactly
mu_0.
Parameters
----------
A : array_like or scalar(float)
This is part of the state transition equation. It should be
`n x n`
C : array_like or scalar(float)
This is part of the state transition equation. It should be
`n x m`
G : array_like or scalar(float)
This describes the relation between y_t and x_t and should
be `k x n`
mu_0 : array_like or scalar(float), optional(default=None)
This is the mean of initial draw and is `n x 1`
Sigma_0 : array_like or scalar(float), optional(default=None)
This is the variance of the initial draw and is `n x n` and
also should be positive definite and symmetric
Attributes
----------
A, C, G, mu_0, Sigma_0 : see Parameters
k, n, m : scalar(int)
The matrix dimensions
"""
def __init__(self, A, C, G, mu_0=None, Sigma_0=None):
self.A, self.G, self.C = list(map(self.convert, (A, G, C)))
self.k, self.n = self.G.shape
self.m = self.C.shape[1]
# == Default initial conditions == #
if mu_0 is None:
self.mu_0 = np.zeros((self.n, 1))
else:
self.mu_0 = np.asarray(mu_0)
if Sigma_0 is None:
self.Sigma_0 = np.zeros((self.n, self.n))
else:
self.Sigma_0 = Sigma_0
def __repr__(self):
return self.__str__()
def __str__(self):
m = """\
Linear Gaussian state space model:
- dimension of state space : {n}
- number of innovations : {m}
- dimension of observation equation : {k}
"""
return dedent(m.format(n=self.n, k=self.k, m=self.m))
def convert(self, x):
"""
Convert array_like objects (lists of lists, floats, etc.) into
well formed 2D NumPy arrays
"""
return np.atleast_2d(np.asarray(x, dtype='float32'))
def simulate(self, ts_length=100):
"""
Simulate a time series of length ts_length, first drawing
x_0 ~ N(mu_0, Sigma_0)
Parameters
----------
ts_length : scalar(int), optional(default=100)
The length of the simulation
Returns
-------
x : array_like(float)
An n x ts_length array, where the t-th column is x_t
y : array_like(float)
A k x ts_length array, where the t-th column is y_t
"""
x = np.empty((self.n, ts_length))
x[:, 0] = multivariate_normal(self.mu_0.flatten(), self.Sigma_0)
w = np.random.randn(self.m, ts_length-1)
for t in range(ts_length-1):
x[:, t+1] = self.A.dot(x[:, t]) + self.C.dot(w[:, t])
y = self.G.dot(x)
return x, y
def replicate(self, T=10, num_reps=100):
"""
Simulate num_reps observations of x_T and y_T given
x_0 ~ N(mu_0, Sigma_0).
Parameters
----------
T : scalar(int), optional(default=10)
The period that we want to replicate values for
num_reps : scalar(int), optional(default=100)
The number of replications that we want
Returns
-------
x : array_like(float)
An n x num_reps array, where the j-th column is the j_th
observation of x_T
y : array_like(float)
A k x num_reps array, where the j-th column is the j_th
observation of y_T
"""
x = np.empty((self.n, num_reps))
for j in range(num_reps):
x_T, _ = self.simulate(ts_length=T+1)
x[:, j] = x_T[:, -1]
y = self.G.dot(x)
return x, y
def moment_sequence(self):
"""
Create a generator to calculate the population mean and
variance-convariance matrix for both x_t and y_t, starting at
the initial condition (self.mu_0, self.Sigma_0). Each iteration
produces a 4-tuple of items (mu_x, mu_y, Sigma_x, Sigma_y) for
the next period.
Yields
------
mu_x : array_like(float)
An n x 1 array representing the population mean of x_t
mu_y : array_like(float)
A k x 1 array representing the population mean of y_t
Sigma_x : array_like(float)
An n x n array representing the variance-covariance matrix
of x_t
Sigma_y : array_like(float)
A k x k array representing the variance-covariance matrix
of y_t
"""
# == Simplify names == #
A, C, G = self.A, self.C, self.G
# == Initial moments == #
mu_x, Sigma_x = self.mu_0, self.Sigma_0
while 1:
mu_y, Sigma_y = G.dot(mu_x), G.dot(Sigma_x).dot(G.T)
yield mu_x, mu_y, Sigma_x, Sigma_y
# == Update moments of x == #
mu_x = A.dot(mu_x)
Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T)
def stationary_distributions(self, max_iter=200, tol=1e-5):
"""
Compute the moments of the stationary distributions of x_t and
y_t if possible. Computation is by iteration, starting from the
initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of x_t
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of y_t
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of x_t
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of y_t
"""
# == Initialize iteration == #
m = self.moment_sequence()
mu_x, mu_y, Sigma_x, Sigma_y = next(m)
i = 0
error = tol + 1
# == Loop until convergence or failuer == #
while error > tol:
if i > max_iter:
fail_message = 'Convergence failed after {} iterations'
raise ValueError(fail_message.format(max_iter))
else:
i += 1
mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)
error_mu = np.max(np.abs(mu_x1 - mu_x))
error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))
error = max(error_mu, error_Sigma)
mu_x, Sigma_x = mu_x1, Sigma_x1
# == Prepare return values == #
mu_x_star, Sigma_x_star = mu_x, Sigma_x
mu_y_star, Sigma_y_star = mu_y1, Sigma_y1
return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star
def geometric_sums(self, beta, x_t):
"""
Forecast the geometric sums
.. math::
S_x := E [sum_{j=0}^{\infty} beta^j x_{t+j} | x_t ]
S_y := E [sum_{j=0}^{\infty} beta^j y_{t+j} | x_t ]
Parameters
----------
beta : scalar(float)
Discount factor, in [0, 1)
beta : array_like(float)
The term x_t for conditioning
Returns
-------
S_x : array_like(float)
Geometric sum as defined above
S_y : array_like(float)
Geometric sum as defined above
"""
I = np.identity(self.n)
S_x = solve(I - beta * self.A, x_t)
S_y = self.G.dot(S_x)
return S_x, S_y
| {
"content_hash": "48e17f8027504d1a8c971874ab2b783d",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 72,
"avg_line_length": 30.66181818181818,
"alnum_prop": 0.532139468690702,
"repo_name": "chenxulong/quanteco",
"id": "c25a6eb9ce9505542c1e6f4ce311c383b572d823",
"size": "8432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantecon/lss.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7059"
},
{
"name": "Python",
"bytes": "517910"
},
{
"name": "Shell",
"bytes": "7171"
}
],
"symlink_target": ""
} |
import sys
import os.path
#sys.path.insert(0, '/home/andy/theano/tool_examples/theano-lstm-0.0.15')
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss
from utilities import *
import dill
import argparse
#import cPickle
import pickle
import numpy
from collections import OrderedDict
import theano, theano.tensor as T
import turing_model
from theano_toolkit.parameters import Parameters
from theano.compile.nanguardmode import NanGuardMode
DESCRIPTION = """
Recurrent neural network based statistical language modelling toolkit
(based on LSTM algorithm)
Implemented by Daniel Soutner,
Department of Cybernetics, University of West Bohemia, Plzen, Czech rep.
dsoutner@kky.zcu.cz, 2013
"""
def parse_args(parser):
parser.add_argument('--train', nargs=1, action="store", metavar="FILE",
help='training file !')
parser.add_argument('--valid', nargs=1, action="store", metavar="FILE",
help='valid file !')
parser.add_argument('--test', nargs=1, action="store", metavar="FILE",
help='testing file for ppl!')
parser.add_argument('--neuron-type', action="store", dest='celltype',
help='type of hidden neurons, RNN/LSTM, default: RNN', type=str, default='RNN')
parser.add_argument('--train-method', action="store", dest='train_method',
help='training method LSTM/TURING/ALL, default: ALL', type=str, default='ALL')
parser.add_argument('--projection-size', action="store", dest='n_projection',
help='Number of neurons in projection layer, default: 100', type=int, default=100)
parser.add_argument('--hidden-size', action="store", dest='n_hidden',
help='Number of neurons in hidden layer, default: 100', type=int, default=100)
parser.add_argument('--stack', action="store", dest='n_stack',
help='Number of hidden neurons, default: 1 ', type=int, default=1)
parser.add_argument('--learning-rate', action="store", dest='lr',
help='learing rate at begining, default: 0.01 ', type=float, default=0.01)
parser.add_argument('--improvement-rate', action="store", dest='improvement_rate',
help='relative improvement for early stopping on ppl , default: 0.005 ', type=float, default=0.005)
parser.add_argument('--minibatch-size', action="store", dest='minibatch_size',
help='minibatch size for training, default: 100', type=int, default=100)
parser.add_argument('--max-epoch', action="store", dest='max_epoch',
help='maximum number of epoch if not early stopping, default: 1000', type=int, default=1000)
parser.add_argument('--early-stop', action="store", dest='early_stop',
help='1 for early-stopping, 0 for not', type=int, default=1)
parser.add_argument('--save-net', action="store", dest="save_net", default=None, metavar="FILE",
help="Save RNN to file")
parser.add_argument('--load-net', action="store", dest="load_net", default=None, metavar="FILE",
help="Load RNN from file")
return parser.parse_args()
def build_vocab(data_file_str):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
lines.append(tokens)
data_file.close()
vocab = Vocab()
for line in lines:
vocab.add_words(line.split(" "))
return vocab
def load_data(data_file_str, vocab, data_type):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
# abandom too long sent in training set., too long sent will take too many time and decrease preformance
tokens_for_count = line.replace('\n','').split(' ')
if len(tokens_for_count) > 50 and data_type == 'train':
continue
lines.append(tokens)
data_file.close()
# transform into big numerical matrix of sentences:
numerical_lines = []
for line in lines:
numerical_lines.append(vocab(line))
numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines)
return numerical_lines, numerical_lengths
def softmax(x):
"""
Wrapper for softmax, helps with
pickling, and removing one extra
dimension that Theano adds during
its exponential normalization.
"""
return T.nnet.softmax(x.T)
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class Model:
"""
Simple predictive model for forecasting words from
sequence using LSTMs. Choose how many LSTMs to stack
what size their memory should be, and how many
words can be predicted.
"""
def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):
# core layer in RNN/LSTM
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
self.turing_params = Parameters()
#init turing machine model
self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , vocab_size)
self.hidden_size = hidden_size
# inputs are matrices of indices,
# each row is a sentence, each column a timestep
self._stop_word = theano.shared(np.int32(999999999), name="stop word")
self.for_how_long = T.ivector()
self.input_mat = T.imatrix()
self.priming_word = T.iscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
# create symbolic variables for prediction:
#change by darong #issue : what is greedy
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
# create symbolic variable for greedy search:
self.greedy_predictions = self.create_lstm_prediction(greedy=True)
# create gradient training functions:
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = 0.01
self.turing_lr = 0.01
self.all_lr = 0.01
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
# create ppl
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
def save(self, save_file, vocab):
pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot
pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot
def save_turing(self, save_file):
self.turing_params.save(save_file + '.turing')
def load(self, load_file, lr):
self.model = pickle.load(open(load_file, "rb"))
if os.path.isfile(load_file + '.turing') :
self.turing_params.load(load_file + '.turing')
else :
print "no turing model!!!! pretrain with lstm param"
self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure
self.turing_params['W_read_hidden'] = self.model.layers[-1].params[0].get_value().T
self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value()
temp = self.model.layers[1].initial_hidden_state.get_value()[self.hidden_size:]
self.turing_params['memory_init'] = temp.reshape((1,)+temp.shape)
# need to compile again for calculating predictions after loading lstm
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = lr
self.turing_lr = lr#change this
self.all_lr = lr
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
print "done loading model"
# print "done compile"
def stop_on(self, idx):
self._stop_word.set_value(idx)
@property
def params(self):
return self.model.params
def create_lstm_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_final_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
hidden_size = result[-2].shape[2]/2
turing_result = self.turing_predict(result[-2][:,:,hidden_size:])
#the last layer do transpose before compute
return turing_result.transpose((1,0,2))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.lstm_cost = masked_loss(self.lstm_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
self.final_cost = masked_loss(self.final_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
def create_predict_function(self):
self.lstm_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.lstm_predictions,
allow_input_downcast=True
)
self.final_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.final_predictions,
allow_input_downcast=True
)
self.greedy_fun = theano.function(
inputs=[self.priming_word],
outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]),
allow_input_downcast=True
)
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.lstm_cost, self.params, method="SGD", lr=self.lstm_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.lstm_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_cost,
updates=updates,
allow_input_downcast=True)
updates_turing = self.turing_updates(self.final_cost , lr=self.turing_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.turing_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_turing,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True),
allow_input_downcast=True)
all_updates_lstm, _, _, _, _ = create_optimization_updates(self.final_cost, self.params, method="SGD", lr=self.all_lr,part=True)
all_updates_turing_temp = self.turing_updates(self.final_cost , lr=self.all_lr)
updates_all = all_updates_lstm
for pair in all_updates_turing_temp :
updates_all[pair[0]] = pair[1]
self.all_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_all,
allow_input_downcast=True)
def create_lstm_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.lstm_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_final_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.final_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_ppl_function(self):
self.lstm_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_ppl,
allow_input_downcast=True)
self.final_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_ppl,
allow_input_downcast=True)
def __call__(self, x):
return self.pred_fun(x)#any problem??
def get_minibatch(full_data, full_lengths, minibatch_size, minibatch_idx):
lengths = []
for j in range(minibatch_size):
lengths.append(full_lengths[minibatch_size * minibatch_idx + j])
width = max(full_lengths)
# width = max(full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :])
height = minibatch_size
minibatch_data = np.empty([height, width], dtype=theano.config.floatX)
minibatch_data = full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :]
return minibatch_data, lengths
def training(args, vocab, train_data, train_lengths, valid_data, valid_lengths):
# training information
print 'training information'
print '-------------------------------------------------------'
print 'method: %s' % args.train_method
print 'vocab size: %d' % len(vocab)
print 'sentences in training file: %d' % len(train_lengths)
print 'max length in training file: %d' % max(train_lengths)
print 'train file: %s' % args.train[0]
print 'valid file: %s' % args.valid[0]
print 'type: %s' % args.celltype
print 'project: %d' % args.n_projection
print 'hidden: %d' % args.n_hidden
print 'stack: %d' % args.n_stack
print 'learning rate: %f' % args.lr
print 'minibatch size: %d' % args.minibatch_size
print 'max epoch: %d' % args.max_epoch
print 'improvement rate: %f' % args.improvement_rate
print 'save file: %s' % args.save_net
print 'load_model: %s' % args.load_net
print 'early-stop: %r' % args.early_stop
print '-------------------------------------------------------'
if args.celltype == 'LSTM':
celltype = LSTM
elif args.celltype == 'RNN':
celltype = RNN
print 'start initializing model'
# construct model & theano functions:
model = Model(
input_size=args.n_projection,
hidden_size=args.n_hidden,
vocab_size=len(vocab),
stack_size=args.n_stack, # make this bigger, but makes compilation slow
celltype=celltype # use RNN or LSTM
)
if args.lr :
model.lstm_lr = args.lr
model.turing_lr = args.lr
model.all_lr = args.lr
model.stop_on(vocab.word2index["."])
if args.load_net :
if args.lr :
model.load(args.load_net, args.lr)# 0 is useless
else :
model.load(args.load_net, 0)
# train:
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
update_fun = model.lstm_update_fun
ppl_fun = model.lstm_ppl_fun
lr = model.lstm_lr
print 'update lstm learning rate : %f' % model.lstm_lr
elif args.train_method == 'TURING' :
update_fun = model.turing_update_fun
ppl_fun = model.final_ppl_fun
lr = model.turing_lr
print 'update turing learning rate : %f' % model.turing_lr
else :
update_fun = model.all_update_fun
ppl_fun = model.final_ppl_fun
lr = model.all_lr
print 'update all learning rate : %f' % model.all_lr
stop_count = 0 # for stop training
change_count = 0 # for change learning rate
print 'start training'
min_valid_ppl = float('inf')
for epoch in range(args.max_epoch):
print "\nepoch %d" % epoch
# minibatch part
minibatch_size = args.minibatch_size # how many examples in a minibatch
n_train_batches = len(train_lengths)/minibatch_size
train_ppl = 0
for minibatch_idx in range(n_train_batches):
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, minibatch_size, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
sys.stdout.write( '\n%d minibatch idx / %d total minibatch, ppl: %f '% (minibatch_idx+1, n_train_batches, minibatch_train_ppl) )
sys.stdout.flush() # important
# rest minibatch if exits
if (minibatch_idx + 1) * minibatch_size != len(train_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(train_lengths) - minibatch_size * minibatch_idx
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, n_rest_example, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
train_ppl = train_ppl / sum(train_lengths)
# print 'done training'
# valid ppl
minibatch_size = min(20, len(valid_lengths))
valid_ppl = 0
n_valid_batches = len(valid_lengths)/minibatch_size
for minibatch_idx in range(n_valid_batches):
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, minibatch_size, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
# last minibatch
if (minibatch_idx + 1) * minibatch_size != len(valid_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(valid_lengths) - minibatch_size * minibatch_idx
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, n_rest_example, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
valid_ppl = valid_ppl / sum(valid_lengths)
print "\ntrain ppl: %f, valid ppl: %f" % (train_ppl, valid_ppl)
if valid_ppl < min_valid_ppl:
min_valid_ppl = valid_ppl
model.save(args.save_net, vocab)
if args.train_method != 'LSTM' :
model.save_turing(args.save_net)
stop_count = 0
change_count = 0
print "save best model"
continue
if args.early_stop:
if (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate:
if stop_count > 2 or lr < 1e-6:
print 'stop training'
break
stop_count = stop_count + 1
elif (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate * 0.5:
# if change_count > 2:
print 'change learning rate from %f to %f' % (lr, lr/2)
model.lstm_lr = model.lstm_lr / 2.
model.turing_lr = model.turing_lr / 2.
model.all_lr = model.all_lr / 2.
if args.train_method == 'LSTM' :
lr = model.lstm_lr
elif args.train_method == 'TURING' :
lr = model.turing_lr
else :
lr = model.all_lr
# change_count = change_count + 1
def testing(args, test_data, test_lengths):
print 'start loading'
model_load = Model(
input_size=1,
hidden_size=1,
vocab_size=1,
stack_size=1, # make this bigger, but makes compilation slow
celltype=RNN # use RNN or LSTM
)
model_load.stop_on(vocab.word2index["."])
if args.train_method != 'LSTM' :
if not os.path.isfile(args.load_net + '.turing') :
print "there is no trained turing file so we can't test by turing model!!"
sys.exit()
model_load.load(args.load_net, 0)
# test ppl
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
ppl_fun = model_load.lstm_ppl_fun
else :
ppl_fun = model_load.final_ppl_fun
minibatch_size = min(20, len(test_lengths))
test_ppl = 0
n_test_batches = len(test_lengths)/minibatch_size
for minibatch_idx in range(n_test_batches):
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, minibatch_size, minibatch_idx)
minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths))
test_ppl = test_ppl + minibatch_test_ppl * sum(lengths)
# last minibatch
if (minibatch_idx + 1) * minibatch_size != len(test_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(test_lengths) - minibatch_size * minibatch_idx
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, n_rest_example, minibatch_idx)
minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths))
test_ppl = test_ppl + minibatch_test_ppl * sum(lengths)
test_ppl = test_ppl / sum(test_lengths)
print "test ppl: %f" %test_ppl
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=DESCRIPTION)
args = parse_args(parser)
# if no args are passed
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
if args.train:
vocab = build_vocab(args.train[0])
train_data, train_lengths = load_data(args.train[0], vocab, 'train')
valid_data, valid_lengths = load_data(args.valid[0], vocab, 'valid')
training(args, vocab, train_data, train_lengths, valid_data, valid_lengths)
elif args.test:
vocab = pickle.load(open(args.load_net+'.vocab', "rb"))
test_data, test_lengths = load_data(args.test[0], vocab, 'test')
testing(args, test_data, test_lengths)
| {
"content_hash": "102d845e3831971e7db0fc3fcf2ef599",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 164,
"avg_line_length": 36.34555712270804,
"alnum_prop": 0.6830299972835577,
"repo_name": "darongliu/Lstm_Turing_LM",
"id": "441640342e688624f6c3f22f477e5eea51bf73e0",
"size": "25769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lstm-neural-turing-machines-lm/exp3/v1-two-weight/lm_v4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "95099551"
},
{
"name": "Python",
"bytes": "4269632"
},
{
"name": "Shell",
"bytes": "156383"
}
],
"symlink_target": ""
} |
'''
Test for deleting and expunge iso cloned vm ops.
The key step:
-add iso
-create vm1 from iso
-clone vm2 from vm1
-del iso
-attach iso/volume to vm2
-expunge and detach iso
-migrate vm2
@author: PxChen
'''
import os
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.image_operations as img_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
iso = None
def test():
global iso
global test_obj_dict
# run condition
allow_bs_list = [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE]
test_lib.skip_test_when_bs_type_not_in_list(allow_bs_list)
hosts = res_ops.query_resource(res_ops.HOST)
if len(hosts) <= 1:
test_util.test_skip("skip for host_num is not satisfy condition host_num>1")
# add iso and create vm from iso
iso = test_stub.add_test_minimal_iso('minimal_iso')
test_obj_dict.add_image(iso)
root_volume_offering = test_stub.add_test_root_volume_offering('root-disk-iso', 10737418240)
test_obj_dict.add_disk_offering(root_volume_offering)
vm_offering = test_stub.add_test_vm_offering(2, 1024*1024*1024, 'iso-vm-offering')
test_obj_dict.add_instance_offering(vm_offering)
vm = test_stub.create_vm_with_iso_for_test(vm_offering.uuid, iso.image.uuid, root_volume_offering.uuid, 'iso-vm')
test_obj_dict.add_vm(vm)
# check vm
vm_inv = vm.get_vm()
test_lib.lib_set_vm_host_l2_ip(vm_inv)
test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22, 1800)
# clone vm
cloned_vm_name = ['cloned_vm']
cloned_vm_obj = vm.clone(cloned_vm_name)[0]
test_obj_dict.add_vm(cloned_vm_obj)
# delete iso
iso.delete()
# vm ops test
test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_ATTACH")
# expunge iso
iso.expunge()
#detach iso
img_ops.detach_iso(vm.vm.uuid)
# vm ops test
test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_MIGRATE")
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Cloned VM ops for BS Success')
# Will be called only if exception happens in test().
def error_cleanup():
global iso
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
try:
iso.delete()
except:
pass
| {
"content_hash": "b265e53cf778cfa7ce5828771578aaa3",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 117,
"avg_line_length": 28.586206896551722,
"alnum_prop": 0.6948130277442702,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "4d0850631793210b6c0da454c0ddc6965cc4f674",
"size": "2487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/bs/test_del_exp_iso_clnd_vm_attach_mig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from ampadb.support import is_admin
from usermanager.models import Profile
@login_required
def index(request):
if is_admin(request.user):
return redirect('contactboard:adminlist')
profile = Profile.objects.get(user=request.user)
return redirect('contactboard:list', profile.alumne.classe.id_interna)
def markdown_help(request):
return render(request, 'support/markdown_help.html')
def search_syntax(request):
return render(request, 'support/search_syntax.html')
| {
"content_hash": "ea2836c1f24037375e9a5d97a0a1b984",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 30,
"alnum_prop": 0.7683333333333333,
"repo_name": "ampafdv/ampadb",
"id": "da27a125bde6718ca9cf55a6095f9e5f5df407e7",
"size": "600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ampadb_index/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "877"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "CoffeeScript",
"bytes": "14975"
},
{
"name": "HTML",
"bytes": "70797"
},
{
"name": "PowerShell",
"bytes": "1001"
},
{
"name": "Python",
"bytes": "242729"
},
{
"name": "Shell",
"bytes": "879"
}
],
"symlink_target": ""
} |
import struct, sys
def seekread(f, offset=None, length=0, relative=True):
if (offset != None):
# offset provided, let's seek
f.seek(offset, [0,1,2][relative])
if (length != 0):
return f.read(length)
def parse_pbzx(pbzx_path):
section = 0
xar_out_path = '%s.part%02d.cpio.xz' % (pbzx_path, section)
f = open(pbzx_path, 'rb')
# pbzx = f.read()
# f.close()
magic = seekread(f,length=4)
if magic != 'pbzx':
raise "Error: Not a pbzx file"
# Read 8 bytes for initial flags
flags = seekread(f,length=8)
# Interpret the flags as a 64-bit big-endian unsigned int
flags = struct.unpack('>Q', flags)[0]
xar_f = open(xar_out_path, 'wb')
while (flags & (1 << 24)):
# Read in more flags
flags = seekread(f,length=8)
flags = struct.unpack('>Q', flags)[0]
# Read in length
f_length = seekread(f,length=8)
f_length = struct.unpack('>Q', f_length)[0]
xzmagic = seekread(f,length=6)
if xzmagic != '\xfd7zXZ\x00':
# This isn't xz content, this is actually _raw decompressed cpio_ chunk of 16MB in size...
# Let's back up ...
seekread(f,offset=-6,length=0)
# ... and split it out ...
f_content = seekread(f,length=f_length)
section += 1
decomp_out = '%s.part%02d.cpio' % (pbzx_path, section)
g = open(decomp_out, 'wb')
g.write(f_content)
g.close()
# Now to start the next section, which should hopefully be .xz (we'll just assume it is ...)
xar_f.close()
section += 1
new_out = '%s.part%02d.cpio.xz' % (pbzx_path, section)
xar_f = open(new_out, 'wb')
else:
f_length -= 6
# This part needs buffering
f_content = seekread(f,length=f_length)
tail = seekread(f,offset=-2,length=2)
xar_f.write(xzmagic)
xar_f.write(f_content)
if tail != 'YZ':
xar_f.close()
raise "Error: Footer is not xar file footer"
try:
f.close()
xar_f.close()
except:
pass
def main():
result = parse_pbzx(sys.argv[1])
print "Now xz decompress the .xz chunks, then 'cat' them all together in order into a single new.cpio file"
if __name__ == '__main__':
main() | {
"content_hash": "6f628978cfa5df92144473cf5189ce30",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 111,
"avg_line_length": 34.971014492753625,
"alnum_prop": 0.5337753833402403,
"repo_name": "phatblat/dotfiles",
"id": "1b92e55ffa89fd9b2e31990151b15556be17d369",
"size": "2992",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bin/pbzx.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4713"
},
{
"name": "Kotlin",
"bytes": "1036"
},
{
"name": "Nix",
"bytes": "679"
},
{
"name": "Python",
"bytes": "13865"
},
{
"name": "Ruby",
"bytes": "21878"
},
{
"name": "Shell",
"bytes": "416686"
},
{
"name": "Swift",
"bytes": "485"
},
{
"name": "Vim Script",
"bytes": "6118"
}
],
"symlink_target": ""
} |
import os
import sqlite3
from time import time, strftime, gmtime
from waskr.config import options
import log
# Fixes Database Absolute Location
FILE_CWD = os.path.abspath(__file__)
FILE_DIR = os.path.dirname(FILE_CWD)
DB_FILE = FILE_DIR+'/waskr.db'
# Engines Supported
engines_supported = ['sqlite', 'mongodb']
class conf_db(object):
def __init__(self,
db = DB_FILE):
self.db = db
if os.path.isfile(self.db):
self.conn = sqlite3.connect(self.db)
self.c = self.conn.cursor()
else:
self.conn = sqlite3.connect(self.db)
table = """CREATE TABLE config(path TEXT)"""
self.c = self.conn.cursor()
self.c.execute(table)
self.conn.commit()
def closedb(self):
"""Make sure the db is closed"""
self.conn.close()
def add_config(self, path):
"""Adds a MASTER config for waskr"""
values = (path,path)
delete = 'DELETE FROM config'
command = 'INSERT INTO config(path) select ? WHERE NOT EXISTS(SELECT 1 FROM config WHERE path=?)'
self.c.execute(delete)
self.c.execute(command, values)
self.conn.commit()
def get_config_path(self):
"""Returns the first entry for the config path"""
command = "SELECT * FROM config limit 1"
return self.c.execute(command)
class Stats(object):
def __init__(self,config=None, test=False):
self.config = options(config)
self.engine = self._load_engine()
self.stats = self.engine.Stats(config, test)
def _load_engine(self):
if self._check_module(self.config['db_engine']):
engine = __import__('waskr.engines.%s' % self.config['db_engine'],
fromlist=['None'])
else:
engine = __import__('waskr.engines.sqlite',
fromlist=['None']) # fall backs to sqlite3
return engine
def _check_module(self, module):
if module not in engines_supported:
return False
return True
def insert(self, stats):
self.stats.insert(stats)
def last_insert(self):
return self.stats.last_insert()
def apps_nodes(self):
return self.stats.apps_nodes()
def response_time(self, minutes):
return self.stats.response_time(minutes)
def response_bundle(self, minutes):
return self.stats.request_bundle(minutes)
def request_bundle(self, minutes):
return self.stats.request_bundle(minutes)
def request_time(self, minutes):
return self.stats.request_time(minutes)
| {
"content_hash": "eaf658db7dbc7fdf1271740964d0fa60",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 106,
"avg_line_length": 25.933333333333334,
"alnum_prop": 0.5780389276533235,
"repo_name": "AloneRoad/waskr",
"id": "ce5a6d147fd5270a884a91784e35f820bf587d8e",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waskr/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "407032"
},
{
"name": "Python",
"bytes": "117040"
}
],
"symlink_target": ""
} |
import numpy as np
import pdb
""" Factor Graph classes forming structure for PGMs
Basic structure is port of MATLAB code by J. Pacheco
Central difference: nbrs stored as references, not ids
(makes message propagation easier)
Note to self: use %pdb and %load_ext autoreload followed by %autoreload 2
"""
class Node(object):
""" Superclass for graph nodes
"""
epsilon = 10**(-4)
def __init__(self, nid):
self.enabled = True
self.nid = nid
self.neighbors = []
self.incoming = []
self.outgoing = []
self.old_outgoing = []
def reset(self):
self.enabled = True
def disable(self):
self.enabled = False
def enable(self):
self.enabled = True
for n in self.neighbors:
# don't call enable() as it will recursively enable entire graph
n.enabled = True
def next_step(self):
""" Used to have this line in prep_messages
but it didn't work?
"""
self.old_outgoing = self.outgoing[:]
def normalize_messages(self):
""" Normalize to sum to 1
"""
self.outgoing = [x / np.sum(x) for x in self.outgoing]
def receive_message(self, node, message):
""" Places new message into correct location in new message list
"""
if self.enabled:
i = self.neighbors.index(node)
self.incoming[i] = message
def send_messages(self):
""" Sends all outgoing messages
"""
for i in xrange(0, len(self.outgoing)):
self.neighbors[i].receive_message(self, self.outgoing[i])
def check_convergence(self):
""" Check if any messages have changed
"""
if self.enabled:
for i in xrange(0, len(self.outgoing)):
# check messages have same shape
self.old_outgoing[i].shape = self.outgoing[i].shape
delta = np.absolute(self.outgoing[i] - self.old_outgoing[i])
if (delta > Node.epsilon).any(): # if there has been change
return False
return True
else:
# Always return True if disabled to avoid interrupting check
return True
class VarNode(Node):
""" Variable node in factor graph
"""
def __init__(self, name, dim, nid):
super(VarNode, self).__init__(nid)
self.name = name
self.dim = dim
self.observed = -1 # only >= 0 if variable is observed
def reset(self):
super(VarNode, self).reset()
size = range(0, len(self.incoming))
self.incoming = [np.ones((self.dim, 1)) for i in size]
self.outgoing = [np.ones((self.dim, 1)) for i in size]
self.old_outgoing = [np.ones((self.dim, 1)) for i in size]
self.observed = -1
def condition(self, observation):
""" Condition on observing certain value
"""
self.enable()
self.observed = observation
# set messages (won't change)
for i in xrange(0, len(self.outgoing)):
self.outgoing[i] = np.zeros((self.dim, 1))
self.outgoing[i][self.observed] = 1.
self.next_step() # copy into old_outgoing
def prep_messages(self):
""" Multiplies together incoming messages to make new outgoing
"""
# compute new messages if no observation has been made
if self.enabled and self.observed < 0 and len(self.neighbors) > 1:
# switch reference for old messages
self.next_step()
for i in xrange(0, len(self.incoming)):
# multiply together all excluding message at current index
curr = self.incoming[:]
del curr[i]
self.outgoing[i] = reduce(np.multiply, curr)
# normalize once finished with all messages
self.normalize_messages()
class FacNode(Node):
""" Factor node in factor graph
"""
def __init__(self, P, nid, *args):
super(FacNode, self).__init__(nid)
self.P = P
self.neighbors = list(args) # list storing refs to variable nodes
# num of edges
n_neighbors = len(self.neighbors)
n_dependencies = self.P.squeeze().ndim
# init messages
for i in xrange(0, n_neighbors):
v = self.neighbors[i]
vdim = v.dim
# init for factor
self.incoming.append(np.ones((vdim, 1)))
self.outgoing.append(np.ones((vdim, 1)))
self.old_outgoing.append(np.ones((vdim, 1)))
# TODO: do this in an add_neighbor function in the VarNode class!
# init for variable
v.neighbors.append(self)
v.incoming.append(np.ones((vdim, 1)))
v.outgoing.append(np.ones((vdim, 1)))
v.old_outgoing.append(np.ones((vdim, 1)))
# error check
assert (n_neighbors == n_dependencies), "Factor dimensions does not match size of domain."
def reset(self):
super(FacNode, self).reset()
for i in xrange(0, len(self.incoming)):
self.incoming[i] = np.ones((self.neighbors[i].dim, 1))
self.outgoing[i] = np.ones((self.neighbors[i].dim, 1))
self.old_outgoing[i] = np.ones((self.neighbors[i].dim, 1))
def prep_messages(self):
""" Multiplies incoming messages w/ P to make new outgoing
"""
if self.enabled:
# switch references for old messages
self.next_step()
n_messages = len(self.incoming)
# do tiling in advance
# roll axes to match shape of newMessage after
for i in xrange(0, n_messages):
# find tiling size
next_shape = list(self.P.shape)
del next_shape[i]
next_shape.insert(0, 1)
# need to expand incoming message to correct num of dims to tile properly
prep_shape = [1 for x in next_shape]
prep_shape[0] = self.incoming[i].shape[0]
self.incoming[i].shape = prep_shape
# tile and roll
self.incoming[i] = np.tile(self.incoming[i], next_shape)
self.incoming[i] = np.rollaxis(self.incoming[i], 0, i+1)
# loop over subsets
for i in xrange(0, n_messages):
curr = self.incoming[:]
del curr[i]
new_message = reduce(np.multiply, curr, self.P)
# sum over all vars except i!
# roll axis i to front then sum over all other axes
new_message = np.rollaxis(new_message, i, 0)
new_message = np.sum(new_message, tuple(range(1, n_messages)))
new_message.shape = (new_message.shape[0], 1)
#store new message
self.outgoing[i] = new_message
# normalize once finished with all messages
self.normalize_messages()
| {
"content_hash": "1501fd46c6e51633e7b1a46ea133c8ad",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 98,
"avg_line_length": 35.65841584158416,
"alnum_prop": 0.5415798972650284,
"repo_name": "dksahuji/pyfac",
"id": "1c62d671512e720a3a7d35d9e80bfd5957fa4736",
"size": "7203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23064"
}
],
"symlink_target": ""
} |
"""
Role tests
"""
import os
import pytest
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name,codenames', [
('openjdk-8-jdk', ['stretch']),
('openjdk-11-jdk', ['bionic', 'buster', 'focal']),
('java-11-openjdk-devel', ['7', '8']),
])
def test_repository_file(host, name, codenames):
"""
Test packages installed
"""
if host.system_info.distribution not in ['centos', 'debian', 'ubuntu']:
pytest.skip('{} ({}) distribution not managed'.format(
host.system_info.distribution, host.system_info.release))
if host.system_info.distribution in ['debian', 'ubuntu']:
if codenames and host.system_info.codename.lower() not in codenames:
pytest.skip('{} package not used with {} ({})'.format(
name, host.system_info.distribution, host.system_info.codename)
)
else:
if codenames and host.system_info.release.lower() not in codenames:
pytest.skip('{} package not used with {} ({})'.format(
name, host.system_info.distribution, host.system_info.release))
assert host.package(name).is_installed
| {
"content_hash": "2d7c9d63351b296209c926f9773e59bf",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 33.18421052631579,
"alnum_prop": 0.6352101506740682,
"repo_name": "infOpen/ansible-role-openjdk-jdk",
"id": "480c98169036d8fb5f54386fd2169ad6081aff29",
"size": "1261",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "molecule/default/tests/test_installation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "1255"
},
{
"name": "Python",
"bytes": "1334"
}
],
"symlink_target": ""
} |
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
import django_filters
from lims.permissions.permissions import IsInStaffGroupOrRO
from lims.shared.mixins import AuditTrailViewMixin
from lims.shared.mixins import StatsViewMixin
from .models import Equipment, EquipmentReservation
from .serializers import EquipmentSerializer, EquipmentReservationSerializer
class EquipmentViewSet(AuditTrailViewMixin, viewsets.ModelViewSet, StatsViewMixin):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
filter_fields = ('can_reserve', 'status',)
search_fields = ('name',)
permission_classes = (IsInStaffGroupOrRO,)
class EquipmentReservationFilter(django_filters.FilterSet):
start = django_filters.IsoDateTimeFilter()
start__gte = django_filters.IsoDateTimeFilter(name='start', lookup_expr='gte')
start__lte = django_filters.IsoDateTimeFilter(name='start', lookup_expr='lte')
end = django_filters.IsoDateTimeFilter()
end__lte = django_filters.IsoDateTimeFilter(name='end', lookup_expr='lte')
class Meta:
model = EquipmentReservation
fields = {
'id': ['exact'],
'start': ['exact', 'gte', 'lte'],
'end': ['exact', 'lte'],
'equipment_reserved': ['exact'],
'checked_in': ['exact'],
'is_confirmed': ['exact'],
'reserved_by__username': ['exact'],
}
class EquipmentReservationViewSet(AuditTrailViewMixin, viewsets.ModelViewSet):
queryset = EquipmentReservation.objects.all()
serializer_class = EquipmentReservationSerializer
filter_class = EquipmentReservationFilter
def perform_create(self, serializer):
if self.request.user.groups.filter(name='staff').exists():
serializer.validated_data['is_confirmed'] = True
serializer.validated_data['confirmed_by'] = self.request.user
serializer.save(reserved_by=self.request.user)
def perform_update(self, serializer):
if (serializer.instance.reserved_by == self.request.user or
self.request.user.groups.filter(name='staff').exists()):
serializer.save()
else:
raise PermissionDenied()
def destroy(self, request, pk=None):
if (request.user == self.get_object().reserved_by or
request.user.groups.filter(name='staff').exists()):
return super(EquipmentReservationViewSet, self).destroy(request, self.get_object().id)
else:
return Response({'message': 'You must have permission to delete'}, status=403)
| {
"content_hash": "5d80ac9a4abbed37bfa03b99dade0e65",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 98,
"avg_line_length": 40.5,
"alnum_prop": 0.687616909839132,
"repo_name": "GETLIMS/LIMS-Backend",
"id": "5225b1664c9441d677f9570d6d70d232c49d3274",
"size": "2674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lims/equipment/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "474"
},
{
"name": "Python",
"bytes": "231759"
}
],
"symlink_target": ""
} |
from msrest.exceptions import (
ClientException,
SerializationError,
DeserializationError,
TokenExpiredError,
ClientRequestError,
AuthenticationError,
HttpOperationError,
)
from .api_client import AutoRestComplexTestService, AutoRestComplexTestServiceConfiguration
__all__ = [
'ClientException',
'SerializationError',
'DeserializationError',
'TokenExpiredError',
'ClientRequestError',
'AuthenticationError',
'HttpOperationError',
'AutoRestComplexTestService',
'AutoRestComplexTestServiceConfiguration'
]
| {
"content_hash": "908484c75fd963b2adb0a9695b7c07ca",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 91,
"avg_line_length": 24.782608695652176,
"alnum_prop": 0.7508771929824561,
"repo_name": "vulcansteel/autorest",
"id": "dd72afda441ee3a0826690ab9e3c167d06961f02",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyComplex/auto_rest_complex_test_service/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "819"
},
{
"name": "C#",
"bytes": "8857811"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "3171512"
},
{
"name": "JavaScript",
"bytes": "4063363"
},
{
"name": "PowerShell",
"bytes": "8003"
},
{
"name": "Puppet",
"bytes": "145"
},
{
"name": "Python",
"bytes": "1831874"
},
{
"name": "Ruby",
"bytes": "218212"
},
{
"name": "TypeScript",
"bytes": "158339"
}
],
"symlink_target": ""
} |
import pdb
import json
import couchdb
import re
from collections import defaultdict
import unicodedata
def initial_data():
couch = couchdb.Server('http://isak:Purpleplant89@tools-dev.scilifelab.se:5984')
db = couch['x_flowcells']
view = db.view('names/project_ids_list')
#Creates a proj -> FCs structure
fc_track = defaultdict(set)
for rec in view.rows:
fc = ''.join(rec.key)
fc = unicodedata.normalize('NFKD', fc).encode('ascii','ignore')
id = ''.join(rec.id)
id = unicodedata.normalize('NFKD', id).encode('ascii','ignore')
for projs in rec.value:
projs = ''.join(projs)
projs = unicodedata.normalize('NFKD', projs).encode('ascii','ignore')
if fc_track[projs] == set([]):
fc_track[projs] = dict()
fc_track[projs][fc] = id
return db, fc_track
def actual_app(db, fc_track, project):
#Collects data in structure [sample][fc][lane]
sample_output = dict()
#Reads expressed per sample
sample_total = dict()
#Samples for a given lane
samples_in_lane = dict()
#Create another hash sample -> FC that gives reads expressed
for p in fc_track:
for fc in fc_track[p]:
samples_in_lane[fc] = dict()
if 'illumina' in db[fc_track[p][fc]]:
xflow_dem = db[fc_track[p][fc]]['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']
for index in xrange(0, len(xflow_dem)):
sample_name = xflow_dem[index]['Sample']
lane = xflow_dem[index]['Lane']
if 'Clusters' in xflow_dem[index]:
clusters = xflow_dem[index]['Clusters']
else:
clusters = xflow_dem[index]['PF Clusters']
#Difference between clusters and raw clusters?
if not sample_name == "Undetermined" or not sample_name == "unknown":
if not lane in samples_in_lane[fc]:
samples_in_lane[fc][lane] = 0
samples_in_lane[fc][lane] += 1
#Removes commas
clusters = int(re.sub(r",", "", clusters))
if not sample_name in sample_output.keys():
sample_output[sample_name] = dict()
if not fc in sample_output[sample_name].keys():
sample_output[sample_name][fc] = dict()
sample_output[sample_name][fc][lane] = clusters
#Also sets sample total
if not sample_name in sample_total.keys():
sample_total[sample_name] = 0
sample_total[sample_name] += clusters
#Check that all samples of THE project are in pools of identical fractions. Might have to check actual sample names later; or omit this
for sample in sample_output:
if project in sample:
a_fc = sample_output[sample].keys()[0]
a_lane = sample_output[sample][a_fc].keys()[0]
that_lane = samples_in_lane[a_fc][a_lane]
for fc in sample_output[sample]:
for lane in sample_output[sample][fc]:
this_lane = samples_in_lane[fc][lane]
if this_lane != that_lane:
print "Error! Sample appeared in differing pools"
pdb.set_trace()
#Gives optimal sample ratio for each lane
unexpr_clust = 0
for sample in sample_total:
if project in sample:
if target_clusters > sample_total[sample]:
unexpr_clust += (target_clusters - sample_total[sample])
pdb.set_trace()
#Converts n into c or v difference
#Calculates the least amount of necessary lane duplications
#Outputs the mapping (print format)
#Outputs the mapping (csv format)
#REMEMBER, ASSUMES LANES ARE 100% DUPLICATE
target_clusters = 320*1000000
clusters_per_lane = 380*1000000
[db, fc_track] = initial_data()
actual_app(db, fc_track, 'P2652')
| {
"content_hash": "5f43d64e9ebba562393f61578b592b82",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 139,
"avg_line_length": 41.53,
"alnum_prop": 0.5550204671321936,
"repo_name": "sylvinite/cluster_pooler",
"id": "38e7b44ce1cdc60d1c24f638f1a91b034fcfeea4",
"size": "4153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LEGconc_recalculator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51192"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^register/$', 'accounts.views.register', name='accounts_register'),
url(r'^login/$', 'accounts.views.login', name='accounts_login'),
url(r'^logout/$', 'accounts.views.logout', name='accounts_logout'),
url(r'^password/reset/$', 'accounts.views.password_reset', name='accounts_pasword_reset'),
url(r'^password/reset/confirm/(?P<token>[0-9A-Za-z\-]+)/$',
'accounts.views.password_reset_confirm',
name='accounts_pasword_reset_confirm'),
)
| {
"content_hash": "e504073ae685ede9780fc9cb5d3d942c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 94,
"avg_line_length": 50.63636363636363,
"alnum_prop": 0.6678635547576302,
"repo_name": "theju/f1oracle",
"id": "d5ea187d40c2e1a4e8e906130255b317c8779d98",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "204"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "52538"
}
],
"symlink_target": ""
} |
from django.db import models
class TimeStampedModel(models.Model):
"""
Абстрактный базовый класс,
предоставляющий поля создания
и обновления экземпляра
"""
created = models.DateTimeField('Создан', auto_now_add=True)
updated = models.DateTimeField('Обновлён', auto_now=True)
class Meta:
abstract = True
| {
"content_hash": "07ccd020b96ec4c89f62df19f6480fb1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 22.5,
"alnum_prop": 0.7587301587301587,
"repo_name": "h4/fuit-webdev",
"id": "d869a73f1bacbae718575cedbb0a9126c261a9fc",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lesson5/pandora/core/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4020"
},
{
"name": "JavaScript",
"bytes": "28175"
},
{
"name": "Python",
"bytes": "56524"
}
],
"symlink_target": ""
} |
'''
liveMic.py
Returns opens mic and returns frameData
'''
import pyaudio
import numpy
class mic():
CHUNK = 1024
p = pyaudio.PyAudio()
def __init__(self, **kwargs):
FORMAT = pyaudio.paInt16
CHANNELS = 2
self.RATE = 44100
self.form = FORMAT
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=self.RATE,
input=True,
frames_per_buffer=self.CHUNK)
def getMicChunkData(self):
data = self.stream.read(self.CHUNK)
swidth = self.p.get_sample_size(self.form)
return ((data, self.CHUNK, swidth ), self.RATE) #returns chunk
def killMic(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
| {
"content_hash": "6478cf14c04ee2559a6357a478313aff",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 70,
"avg_line_length": 24.205882352941178,
"alnum_prop": 0.5504252733900364,
"repo_name": "ThisIsRobokitty/sound-particle-generator",
"id": "0c8cf83e19dfba7de647430439a301c1d80d4588",
"size": "823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/input/liveMic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25333"
}
],
"symlink_target": ""
} |
"""SEED agent using Keras."""
import collections
from seed_rl.common import utils
import tensorflow as tf
AgentOutput = collections.namedtuple('AgentOutput', 'action q_values')
AgentState = collections.namedtuple(
# core_state: the opaque state of the recurrent core of the agent.
# frame_stacking_state: a list of the last (stack_size - 1) observations
# with shapes typically [batch_size, height, width, 1].
'AgentState', 'core_state frame_stacking_state')
STACKING_STATE_DTYPE = tf.int32
def initial_frame_stacking_state(stack_size, batch_size, observation_shape):
"""Returns the initial frame stacking state.
It should match what stack_frames accepts and produces.
Args:
stack_size: int, the number of frames that should be stacked to form the
observation provided to the neural network. stack_size=1 corresponds to no
stacking.
batch_size: int tensor.
observation_shape: list, shape of a single observation, e.g.
[height, width, 1].
Returns:
<STACKING_STATE_DTYPE>[batch_size, prod(observation_shape)] or an empty
tuple if stack_size=1.
"""
if stack_size == 1:
return ()
return tf.zeros(
tf.concat([[batch_size], [tf.math.reduce_prod(observation_shape)]],
axis=0),
dtype=STACKING_STATE_DTYPE)
def stack_frames(frames, frame_stacking_state, done, stack_size):
"""Stacks frames.
The [height, width] center dimensions of the tensors below are typical, but
could be anything else consistent between tensors.
Args:
frames: <float32>[time, batch_size, height, width, channels]. These should
be un-normalized frames in range [0, 255]. channels must be equal to 1
when we actually stack frames (stack_size > 1).
frame_stacking_state: If stack_size > 1, <int32>[batch_size, height*width].
() if stack_size=1.
Frame are bit-packed. The LSBs correspond to the oldest frames, MSBs to
newest. Frame stacking state contains un-normalized frames in range
[0, 256). We use [height*width] for the observation shape instead of
[height, width] because it speeds up transfers to/from TPU by a factor ~2.
done: <bool>[time, batch_size]
stack_size: int, the number of frames to stack.
Returns:
A pair:
- stacked frames, <float32>[time, batch_size, height, width, stack_size]
tensor (range [0, 255]). Along the stack dimensions, frames go from
newest to oldest.
- New stacking state with the last (stack_size-1) frames.
"""
if frames.shape[0:2] != done.shape[0:2]:
raise ValueError(
'Expected same first 2 dims for frames and dones. Got {} vs {}.'.format(
frames.shape[0:2], done.shape[0:2]))
batch_size = frames.shape[1]
obs_shape = frames.shape[2:-1]
if stack_size > 4:
raise ValueError('Only up to stack size 4 is supported due to bit-packing.')
if stack_size > 1 and frames.shape[-1] != 1:
raise ValueError('Due to frame stacking, we require last observation '
'dimension to be 1. Got {}'.format(frames.shape[-1]))
if stack_size == 1:
return frames, ()
if frame_stacking_state[0].dtype != STACKING_STATE_DTYPE:
raise ValueError('Expected dtype {} got {}'.format(
STACKING_STATE_DTYPE, frame_stacking_state[0].dtype))
frame_stacking_state = tf.reshape(
frame_stacking_state, [batch_size] + obs_shape)
# Unpacked 'frame_stacking_state'. Ordered from oldest to most recent.
unstacked_state = []
for i in range(stack_size - 1):
# [batch_size, height, width]
unstacked_state.append(tf.cast(tf.bitwise.bitwise_and(
tf.bitwise.right_shift(frame_stacking_state, i * 8), 0xFF),
tf.float32))
# Same as 'frames', but with the previous (stack_size - 1) frames from
# frame_stacking_state prepended.
# [time+stack_size-1, batch_size, height, width, 1]
extended_frames = tf.concat(
[tf.reshape(frame, [1] + frame.shape + [1])
for frame in unstacked_state] +
[frames],
axis=0)
# [time, batch_size, height, width, stack_size].
# Stacked frames, but does not take 'done' into account. We need to zero-out
# the frames that cross episode boundaries.
# Along the stack dimensions, frames go from newest to oldest.
stacked_frames = tf.concat(
[extended_frames[stack_size - 1 - i:extended_frames.shape[0] - i]
for i in range(stack_size)],
axis=-1)
# We create a mask that contains true when the frame should be zeroed out.
# Setting the final shape of the mask early actually makes computing
# stacked_done_masks a few times faster.
done_mask_row_shape = done.shape[0:2] + [1] * (frames.shape.rank - 2)
done_masks = [
tf.zeros(done_mask_row_shape, dtype=tf.bool),
tf.reshape(done, done_mask_row_shape)
]
while len(done_masks) < stack_size:
previous_row = done_masks[-1]
# Add 1 zero in front (time dimension).
done_masks.append(
tf.math.logical_or(
previous_row,
tf.pad(previous_row[:-1],
[[1, 0]] + [[0, 0]] * (previous_row.shape.rank - 1))))
# This contains true when the frame crosses an episode boundary and should
# therefore be zeroed out.
# Example: ignoring batch_size, if done is [0, 1, 0, 0, 1, 0], stack_size=4,
# this will be:
# [[0 0, 0, 0, 0, 0],
# [0 1, 0, 0, 1, 0],
# [0 1, 1, 0, 1, 1],
# [0 1, 1, 1, 1, 1]].T
# <bool>[time, batch_size, 1, 1, stack_size].
stacked_done_masks = tf.concat(done_masks, axis=-1)
stacked_frames = tf.where(
stacked_done_masks,
tf.zeros_like(stacked_frames), stacked_frames)
# Build the new bit-packed state.
# We construct the new state from 'stacked_frames', to make sure frames
# before done is true are zeroed out.
# This shifts the stack_size-1 items of the last dimension of
# 'stacked_frames[-1, ..., :-1]'.
shifted = tf.bitwise.left_shift(
tf.cast(stacked_frames[-1, ..., :-1], tf.int32),
# We want to shift so that MSBs are newest frames.
[8 * i for i in range(stack_size - 2, -1, -1)])
# This is really a reduce_or, because bits don't overlap.
new_state = tf.reduce_sum(shifted, axis=-1)
new_state = tf.reshape(new_state, [batch_size, obs_shape.num_elements()])
return stacked_frames, new_state
def _unroll_cell(inputs, done, start_state, zero_state, recurrent_cell):
"""Applies a recurrent cell on inputs, taking care of managing state.
Args:
inputs: A tensor of shape [time, batch_size, <remaining dims>]. These are
the inputs passed to the recurrent cell.
done: <bool>[time, batch_size].
start_state: Recurrent cell state at the beginning of the input sequence.
Opaque tf.nest structure of tensors with batch front dimension.
zero_state: Blank recurrent cell state. The current recurrent state will be
replaced by this blank state whenever 'done' is true. Same shape as
'start_state'.
recurrent_cell: Function that will be applied at each time-step. Takes
(input_t: [batch_size, <remaining dims>], current_state) as input, and
returns (output_t: [<cell output dims>], new_state).
Returns:
A pair:
- The time-stacked outputs of the recurrent cell. Shape [time,
<cell output dims>].
- The last state output by the recurrent cell.
"""
stacked_outputs = []
state = start_state
inputs_list = tf.unstack(inputs)
done_list = tf.unstack(done)
assert len(inputs_list) == len(done_list), (
"Inputs and done tensors don't have same time dim {} vs {}".format(
len(inputs_list), len(done_list)))
# Loop over time dimension.
# input_t: [batch_size, batch_size, <remaining dims>].
# done_t: [batch_size].
for input_t, done_t in zip(inputs_list, done_list):
# If the episode ended, the frame state should be reset before the next.
state = tf.nest.map_structure(
lambda x, y, done_t=done_t: tf.where(
tf.reshape(done_t, [done_t.shape[0]] + [1] *
(x.shape.rank - 1)), x, y),
zero_state,
state)
output_t, state = recurrent_cell(input_t, state)
stacked_outputs.append(output_t)
return tf.stack(stacked_outputs), state
class DuelingLSTMDQNNet(tf.Module):
"""The recurrent network used to compute the agent's Q values.
This is the dueling LSTM net similar to the one described in
https://openreview.net/pdf?id=rkHVZWZAZ (only the Q(s, a) part), with the
layer sizes mentioned in the R2D2 paper
(https://openreview.net/pdf?id=r1lyTjAqYX), section Hyper parameters.
"""
def __init__(self, num_actions, observation_shape, stack_size=1):
super(DuelingLSTMDQNNet, self).__init__(name='dueling_lstm_dqn_net')
self._num_actions = num_actions
self._body = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, [8, 8], 4,
padding='valid', activation='relu'),
tf.keras.layers.Conv2D(64, [4, 4], 2,
padding='valid', activation='relu'),
tf.keras.layers.Conv2D(64, [3, 3], 1,
padding='valid', activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
])
self._value = tf.keras.Sequential([
tf.keras.layers.Dense(512, activation='relu', name='hidden_value'),
tf.keras.layers.Dense(1, name='value_head'),
])
self._advantage = tf.keras.Sequential([
tf.keras.layers.Dense(512, activation='relu', name='hidden_advantage'),
tf.keras.layers.Dense(self._num_actions, use_bias=False,
name='advantage_head'),
])
self._core = tf.keras.layers.LSTMCell(512)
self._observation_shape = observation_shape
self._stack_size = stack_size
def initial_state(self, batch_size):
return AgentState(
core_state=self._core.get_initial_state(
batch_size=batch_size, dtype=tf.float32),
frame_stacking_state=initial_frame_stacking_state(
self._stack_size, batch_size, self._observation_shape))
def _torso(self, prev_action, env_output):
# [batch_size, output_units]
conv_out = self._body(env_output.observation)
# [batch_size, num_actions]
one_hot_prev_action = tf.one_hot(prev_action, self._num_actions)
# [batch_size, torso_output_size]
return tf.concat(
[conv_out, tf.expand_dims(env_output.reward, -1), one_hot_prev_action],
axis=1)
def _head(self, core_output):
# [batch_size, 1]
value = self._value(core_output)
# [batch_size, num_actions]
advantage = self._advantage(core_output)
advantage -= tf.reduce_mean(advantage, axis=-1, keepdims=True)
# [batch_size, num_actions]
q_values = value + advantage
action = tf.cast(tf.argmax(q_values, axis=1), tf.int32)
return AgentOutput(action, q_values)
def __call__(self, input_, agent_state, unroll=False):
"""Applies a network mapping observations to actions.
Args:
input_: A pair of:
- previous actions, <int32>[batch_size] tensor if unroll is False,
otherwise <int32>[time, batch_size].
- EnvOutput, where each field is a tensor with added front
dimensions [batch_size] if unroll is False and [time, batch_size]
otherwise.
agent_state: AgentState with batched tensors, corresponding to the
beginning of each unroll.
unroll: should unrolling be aplied.
Returns:
A pair of:
- outputs: AgentOutput, where action is a tensor <int32>[time,
batch_size], q_values is a tensor <float32>[time, batch_size,
num_actions]. The time dimension is not present if unroll=False.
- agent_state: Output AgentState with batched tensors.
"""
if not unroll:
# Add time dimension.
input_ = tf.nest.map_structure(lambda t: tf.expand_dims(t, 0),
input_)
prev_actions, env_outputs = input_
outputs, agent_state = self._unroll(prev_actions, env_outputs, agent_state)
if not unroll:
# Remove time dimension.
outputs = tf.nest.map_structure(lambda t: tf.squeeze(t, 0), outputs)
return outputs, agent_state
def _unroll(self, prev_actions, env_outputs, agent_state):
# [time, batch_size, <field shape>]
unused_reward, done, observation, _, _ = env_outputs
observation = tf.cast(observation, tf.float32)
initial_agent_state = self.initial_state(batch_size=tf.shape(done)[1])
stacked_frames, frame_state = stack_frames(
observation, agent_state.frame_stacking_state, done, self._stack_size)
env_outputs = env_outputs._replace(observation=stacked_frames / 255)
# [time, batch_size, torso_output_size]
torso_outputs = utils.batch_apply(self._torso, (prev_actions, env_outputs))
core_outputs, core_state = _unroll_cell(
torso_outputs, done, agent_state.core_state,
initial_agent_state.core_state,
self._core)
agent_output = utils.batch_apply(self._head, (core_outputs,))
return agent_output, AgentState(core_state, frame_state)
| {
"content_hash": "4623073f5399a67392b5fe036e42a10b",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 80,
"avg_line_length": 40.39692307692307,
"alnum_prop": 0.6482595780333613,
"repo_name": "google-research/seed_rl",
"id": "b2a692632377f5b0b8a62b48b0f4df3babd00a2e",
"size": "13733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atari/networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "41131"
},
{
"name": "Jupyter Notebook",
"bytes": "72883"
},
{
"name": "Python",
"bytes": "614110"
},
{
"name": "Shell",
"bytes": "31284"
},
{
"name": "Starlark",
"bytes": "932"
}
],
"symlink_target": ""
} |
"""ApacheParser is a member object of the ApacheConfigurator class."""
import fnmatch
import itertools
import logging
import os
import re
import subprocess
from letsencrypt import errors
from letsencrypt_apache import constants
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, vhostroot, version=(2, 4)):
# Note: Order is important here.
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.parser_paths = {}
self.variables = {}
if version >= (2, 4):
self.update_runtime_variables()
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self._parse_file(self.loc["root"])
self.vhostroot = os.path.abspath(vhostroot)
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
# Must also attempt to parse virtual host root
self._parse_file(self.vhostroot + "/" +
constants.os_constant("vhost_files"))
# check to see if there were unparsed define statements
if version < (2, 4):
if self.find_dir("Define", exclude=False):
raise errors.PluginError("Error parsing runtime variables")
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in itertools.izip(
iterator, iterator):
self.modules.add(self.get_arg(match_name))
self.modules.add(
os.path.basename(self.get_arg(match_filename))[:-2] + "c")
def update_runtime_variables(self):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within
the dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables...
simply for arg_get()
"""
stdout = self._get_runtime_cfg()
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
return
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"runtime config dump.")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
constants.os_constant("define_cmd"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error running command %s for runtime parameters!%s",
constants.os_constant("define_cmd"), os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s",
constants.os_constant("define_cmd"))
# Small errors that do not impede
if proc.returncode != 0:
logger.warn("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" %
(args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does
# this, but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self._parse_file(os.path.join(arg, "*"))
else:
self._parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
def _parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
if new_file_match == "*":
remove_old = True
else:
remove_old = False
except KeyError:
use_new = True
remove_old = False
return use_new, remove_old
def _remove_httpd_transform(self, filepath):
"""Remove path from Augeas transform
:param str filepath: filepath to remove
"""
remove_basenames = self.parser_paths[os.path.dirname(filepath)]
remove_dirname = os.path.dirname(filepath)
for name in remove_basenames:
remove_path = remove_dirname + "/" + name
remove_inc = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % remove_path)
self.aug.remove(remove_inc[0])
self.parser_paths.pop(remove_dirname)
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
# Add included path to paths dictionary
try:
self.parser_paths[os.path.dirname(incl)].append(
os.path.basename(incl))
except KeyError:
self.parser_paths[os.path.dirname(incl)] = [
os.path.basename(incl)]
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self.loc["root"]
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf", "conf/httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
| {
"content_hash": "80fafcd4ad488ed21950d956b765b383",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 91,
"avg_line_length": 37.45031055900621,
"alnum_prop": 0.5716062691765487,
"repo_name": "TheBoegl/letsencrypt",
"id": "3c13aae5ff3ceeb0225c9e467813596735ffbb07",
"size": "24118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letsencrypt-apache/letsencrypt_apache/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "50413"
},
{
"name": "Augeas",
"bytes": "4997"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1388093"
},
{
"name": "Shell",
"bytes": "104220"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_docusign_demo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='signer',
name='email',
field=models.EmailField(db_index=True, max_length=254, verbose_name='email'),
),
migrations.AlterField(
model_name='signer',
name='signing_order',
field=models.PositiveSmallIntegerField(default=0, help_text='Position in the list of signers.', verbose_name='signing order'),
),
]
| {
"content_hash": "1e4c9948e3202a49284094b511309584",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 138,
"avg_line_length": 28.956521739130434,
"alnum_prop": 0.6081081081081081,
"repo_name": "novafloss/django-docusign",
"id": "51fdb3f654bf70f1374b0085c6e4d8475a47acaa",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/django_docusign_demo/migrations/0002_auto_20160905_0255.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1639"
},
{
"name": "Python",
"bytes": "19373"
}
],
"symlink_target": ""
} |
'''
Copyright 2014-2015 Teppo Perä
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from pytraits.core import ndict, Singleton
from pytraits.combiner import combine_class
from pytraits.extendable import extendable
from pytraits.trait_composer import add_traits | {
"content_hash": "8a23af964e1c27b0650f597183451cb8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 38.55,
"alnum_prop": 0.7704280155642024,
"repo_name": "Debith/py2traits",
"id": "62db016b46433a1de0a00b08562c17667f9cc81d",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pytraits/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55112"
}
],
"symlink_target": ""
} |
import testtools
from unittest import mock
from troveclient.v1 import databases
"""
Unit tests for databases.py
"""
class DatabasesTest(testtools.TestCase):
def setUp(self):
super(DatabasesTest, self).setUp()
self.orig__init = databases.Databases.__init__
databases.Databases.__init__ = mock.Mock(return_value=None)
self.databases = databases.Databases()
self.databases.api = mock.Mock()
self.databases.api.client = mock.Mock()
self.instance_with_id = mock.Mock()
self.instance_with_id.id = 215
self.fakedb1 = ['db1']
self.fakedb2 = ['db1', 'db2']
def tearDown(self):
super(DatabasesTest, self).tearDown()
databases.Databases.__init__ = self.orig__init
def _get_mock_method(self):
self._resp = mock.Mock()
self._body = None
self._url = None
def side_effect_func(url, body=None):
self._body = body
self._url = url
return (self._resp, body)
return mock.Mock(side_effect=side_effect_func)
def test_create(self):
self.databases.api.client.post = self._get_mock_method()
self._resp.status_code = 200
self.databases.create(23, self.fakedb1)
self.assertEqual('/instances/23/databases', self._url)
self.assertEqual({"databases": self.fakedb1}, self._body)
self.databases.create(23, self.fakedb2)
self.assertEqual('/instances/23/databases', self._url)
self.assertEqual({"databases": self.fakedb2}, self._body)
# test creation with the instance as an object
self.databases.create(self.instance_with_id, self.fakedb1)
self.assertEqual({"databases": self.fakedb1}, self._body)
def test_delete(self):
self.databases.api.client.delete = self._get_mock_method()
self._resp.status_code = 200
self.databases.delete(27, self.fakedb1[0])
self.assertEqual('/instances/27/databases/%s' % self.fakedb1[0],
self._url)
self.databases.delete(self.instance_with_id, self.fakedb1[0])
self.assertEqual('/instances/%s/databases/%s' %
(self.instance_with_id.id, self.fakedb1[0]),
self._url)
self._resp.status_code = 400
self.assertRaises(Exception, self.databases.delete, 34, self.fakedb1)
def test_list(self):
page_mock = mock.Mock()
self.databases._paginated = page_mock
self.databases.list('instance1')
page_mock.assert_called_with('/instances/instance1/databases',
'databases', None, None)
limit = 'test-limit'
marker = 'test-marker'
self.databases.list('instance1', limit, marker)
page_mock.assert_called_with('/instances/instance1/databases',
'databases', limit, marker)
| {
"content_hash": "09016f3be8a74f5f0e6bee971f7a4f4c",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 36.098765432098766,
"alnum_prop": 0.6025991792065664,
"repo_name": "openstack/python-troveclient",
"id": "d8b30eaec20fdc1cdbdd7376f02c2904d05432c5",
"size": "3551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troveclient/tests/test_databases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "904048"
},
{
"name": "Shell",
"bytes": "1432"
}
],
"symlink_target": ""
} |
"""pySTS - Server Application"""
import select
from hashlib import sha256
import pysts.util as sutil
import pysts.cryptography as crypto
from pysts.protocol import STSClient
from pysts.protocol import STServer
from pysts.protocol import STSWorker
def comunicate(worker, connection):
"""Comunicate with client"""
secret_key = worker._sts["secret_key"]
des3_key = sha256(secret_key).digest()[:16]
des3_cipher = crypto.DES3Cipher(des3_key)
while True:
socket_list = [connection]
# Get the list sockets which are readable
read_sockets, _, _ = select.select(socket_list, [], [])
for sock in read_sockets:
# incoming message from remote server
if sock == connection:
try:
message = sutil.read_data(connection)
except ValueError as msg:
print("[x] {}".format(msg))
message = des3_cipher.decrypt(message)
print("<<< {}".format(message))
message = des3_cipher.encrypt(message)
print(">>> {}".format(message))
connection.sendall(message)
STSWorker.comunicate = comunicate
def start_server():
"""Start STS Server"""
server = STServer()
server.run()
if __name__ == "__main__":
start_server()
| {
"content_hash": "8ab9b797ac2c06a4929f91090373c022",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 63,
"avg_line_length": 26.254901960784313,
"alnum_prop": 0.5959671396564601,
"repo_name": "c-square/homework",
"id": "8236375221c95daffc3e7c63cc696f5be9ba90fa",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Licență/Anul III/SI/Station-to-Station Protocol & 3DES/scripts/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Apex",
"bytes": "12340"
},
{
"name": "C",
"bytes": "1547"
},
{
"name": "C++",
"bytes": "269593"
},
{
"name": "Haskell",
"bytes": "14450"
},
{
"name": "Python",
"bytes": "151009"
},
{
"name": "R",
"bytes": "16961"
},
{
"name": "TeX",
"bytes": "84806"
}
],
"symlink_target": ""
} |
"""Provides an easy-to-use Python interface to NLPIR/ICTCLAS.
The functions below are not as extensive as the full set of functions exported
by NLPIR (for that, see :mod:`pynlpir.nlpir`). A few design choices have been
made with these functions as well, e.g. they have been renamed and their output
is formatted differently.
The functions in this module all assume input is either unicode or encoded
using the encoding specified when :func:`open` is called.
These functions return unicode strings.
After importing this module, you must call :func:`open` in order to initialize
the NLPIR API. When you're done using the NLPIR API, call :func:`close` to exit
the API.
"""
from __future__ import unicode_literals
import datetime as dt
import logging
import os
import sys
from . import nlpir, pos_map
__version__ = '0.6.0'
logger = logging.getLogger('pynlpir')
fopen = open
is_python3 = sys.version_info[0] > 2
if is_python3:
unicode = str
#: The encoding configured by :func:`open`.
ENCODING = 'utf_8'
#: The encoding error handling scheme configured by :func:`open`.
ENCODING_ERRORS = 'strict'
class LicenseError(Exception):
"""A custom exception for missing/invalid license errors."""
pass
def open(data_dir=nlpir.PACKAGE_DIR, encoding=ENCODING, # noqa: A001
encoding_errors=ENCODING_ERRORS, license_code=None):
"""Initializes the NLPIR API.
This calls the function :func:`~pynlpir.nlpir.Init`.
:param str data_dir: The absolute path to the directory that has NLPIR's
`Data` directory (defaults to :data:`pynlpir.nlpir.PACKAGE_DIR`).
:param str encoding: The encoding that the Chinese source text will be in
(defaults to ``'utf_8'``). Possible values include ``'gbk'``,
``'utf_8'``, or ``'big5'``.
:param str encoding_errors: The desired encoding error handling scheme.
Possible values include ``'strict'``, ``'ignore'``, and ``'replace'``.
The default error handler is 'strict' meaning that encoding errors
raise :class:`ValueError` (or a more codec specific subclass, such
as :class:`UnicodeEncodeError`).
:param str license_code: The license code that should be used when
initializing NLPIR. This is generally only used by commercial users.
:raises RuntimeError: The NLPIR API failed to initialize. Sometimes, NLPIR
leaves an error log in the current working directory or NLPIR's
``Data`` directory that provides more detailed messages (but this isn't
always the case).
:raises LicenseError: The NLPIR license appears to be missing or expired.
"""
if license_code is None:
license_code = ''
global ENCODING
if encoding.lower() in ('utf_8', 'utf-8', 'u8', 'utf', 'utf8'):
ENCODING = 'utf_8'
encoding_constant = nlpir.UTF8_CODE
elif encoding.lower() in ('gbk', '936', 'cp936', 'ms936'):
ENCODING = 'gbk'
encoding_constant = nlpir.GBK_CODE
elif encoding.lower() in ('big5', 'big5-tw', 'csbig5'):
ENCODING = 'big5'
encoding_constant = nlpir.BIG5_CODE
else:
raise ValueError("encoding must be one of 'utf_8', 'big5', or 'gbk'.")
logger.debug("Initializing the NLPIR API: 'data_dir': '{}', 'encoding': "
"'{}', 'license_code': '{}'".format(
data_dir, encoding, license_code))
global ENCODING_ERRORS
if encoding_errors not in ('strict', 'ignore', 'replace'):
raise ValueError("encoding_errors must be one of 'strict', 'ignore', "
"or 'replace'.")
else:
ENCODING_ERRORS = encoding_errors
# Init in Python 3 expects bytes, not strings.
if is_python3 and isinstance(data_dir, str):
data_dir = _encode(data_dir)
if is_python3 and isinstance(license_code, str):
license_code = _encode(license_code)
if not nlpir.Init(data_dir, encoding_constant, license_code):
_attempt_to_raise_license_error(data_dir)
raise RuntimeError("NLPIR function 'NLPIR_Init' failed.")
else:
logger.debug("NLPIR API initialized.")
def close():
"""Exits the NLPIR API and frees allocated memory.
This calls the function :func:`~pynlpir.nlpir.Exit`.
"""
logger.debug("Exiting the NLPIR API.")
if not nlpir.Exit():
logger.warning("NLPIR function 'NLPIR_Exit' failed.")
else:
logger.debug("NLPIR API exited.")
def _attempt_to_raise_license_error(data_dir):
"""Raise an error if NLPIR has detected a missing or expired license.
:param str data_dir: The directory containing NLPIR's `Data` directory.
:raises LicenseError: The NLPIR license appears to be missing or expired.
"""
if isinstance(data_dir, bytes):
data_dir = _decode(data_dir)
data_dir = os.path.join(data_dir, 'Data')
current_date = dt.date.today().strftime('%Y%m%d')
timestamp = dt.datetime.today().strftime('[%Y-%m-%d %H:%M:%S]')
data_files = os.listdir(data_dir)
for f in data_files:
if f == (current_date + '.err'):
file_name = os.path.join(data_dir, f)
with fopen(file_name) as error_file:
for line in error_file:
if not line.startswith(timestamp):
continue
if 'Not valid license' in line:
raise LicenseError('Your license appears to have '
'expired. Try running "pynlpir '
'update".')
elif 'Can not open License file' in line:
raise LicenseError('Your license appears to be '
'missing. Try running "pynlpir '
'update".')
def _decode(s, encoding=None, errors=None):
"""Decodes *s*."""
if encoding is None:
encoding = ENCODING
if errors is None:
errors = ENCODING_ERRORS
return s if isinstance(s, unicode) else s.decode(encoding, errors)
def _encode(s, encoding=None, errors=None):
"""Encodes *s*."""
if encoding is None:
encoding = ENCODING
if errors is None:
errors = ENCODING_ERRORS
return s.encode(encoding, errors) if isinstance(s, unicode) else s
def _to_float(s):
"""Converts *s* to a float if possible; if not, returns `False`."""
try:
f = float(s)
return f
except ValueError:
return False
def _get_pos_name(code, name='parent', english=True, delimiter=':',
pos_tags=pos_map.POS_MAP):
"""Gets the part of speech name for *code*.
Joins the names together with *delimiter* if *name* is ``'all'``.
See :func:``pynlpir.pos_map.get_pos_name`` for more information.
"""
pos_name = pos_map.get_pos_name(code, name, english, pos_tags=pos_tags)
return delimiter.join(pos_name) if name == 'all' else pos_name
def segment(s, pos_tagging=True, pos_names='parent', pos_english=True,
pos_tags=pos_map.POS_MAP):
"""Segment Chinese text *s* using NLPIR.
The segmented tokens are returned as a list. Each item of the list is a
string if *pos_tagging* is `False`, e.g. ``['我们', '是', ...]``. If
*pos_tagging* is `True`, then each item is a tuple (``(token, pos)``), e.g.
``[('我们', 'pronoun'), ('是', 'verb'), ...]``.
If *pos_tagging* is `True` and a segmented word is not recognized by
NLPIR's part of speech tagger, then the part of speech code/name will
be returned as :data:`None` (e.g. a space returns as ``(' ', None)``).
This uses the function :func:`~pynlpir.nlpir.ParagraphProcess` to segment
*s*.
:param s: The Chinese text to segment. *s* should be Unicode or a UTF-8
encoded string.
:param bool pos_tagging: Whether or not to include part of speech tagging
(defaults to ``True``).
:param pos_names: What type of part of speech names to return. This
argument is only used if *pos_tagging* is ``True``. :data:`None`
means only the original NLPIR part of speech code will be returned.
Other than :data:`None`, *pos_names* may be one of ``'parent'``,
``'child'``, ``'all'``, or ``'raw'``. Defaults to ``'parent'``.
``'parent'`` indicates that only the most generic name should be used,
e.g. ``'noun'`` for ``'nsf'``. ``'child'`` indicates that the most
specific name should be used, e.g. ``'transcribed toponym'`` for
``'nsf'``. ``'all'`` indicates that all names should be used, e.g.
``'noun:toponym:transcribed toponym'`` for ``'nsf'``.
``'raw'`` indicates that original names should be used.
:type pos_names: ``str`` or :data:`None`
:param bool pos_english: Whether to use English or Chinese for the part
of speech names, e.g. ``'conjunction'`` or ``'连词'``. Defaults to
``True``. This is only used if *pos_tagging* is ``True``.
:param dict pos_tags: Custom part of speech tags to use.
"""
s = _decode(s)
s = s.strip()
logger.debug("Segmenting text with{} POS tagging: {}.".format(
'' if pos_tagging else 'out', s))
result = nlpir.ParagraphProcess(_encode(s), pos_tagging)
result = _decode(result)
logger.debug("Finished segmenting text: {}.".format(result))
logger.debug("Formatting segmented text.")
tokens = result.strip().replace(' ', ' ').split(' ')
tokens = [' ' if t == '' else t for t in tokens]
if pos_tagging:
for i, t in enumerate(tokens):
token = tuple(t.rsplit('/', 1))
if len(token) == 1:
token = (token[0], None)
if pos_names is not None and token[1] is not None:
pos_name = _get_pos_name(token[1], pos_names, pos_english,
pos_tags=pos_tags)
token = (token[0], pos_name)
tokens[i] = token
logger.debug("Formatted segmented text: {}.".format(tokens))
return tokens
def get_key_words(s, max_words=50, weighted=False):
"""Determines key words in Chinese text *s*.
The key words are returned in a list. If *weighted* is ``True``,
then each list item is a tuple: ``(word, weight)``, where
*weight* is a float. If it's *False*, then each list item is a string.
This uses the function :func:`~pynlpir.nlpir.GetKeyWords` to determine
the key words in *s*.
:param s: The Chinese text to analyze. *s* should be Unicode or a UTF-8
encoded string.
:param int max_words: The maximum number of key words to find (defaults to
``50``).
:param bool weighted: Whether or not to return the key words' weights
(defaults to ``True``).
"""
s = _decode(s)
logger.debug("Searching for up to {}{} key words in: {}.".format(
max_words, ' weighted' if weighted else '', s))
result = nlpir.GetKeyWords(_encode(s), max_words, weighted)
result = _decode(result)
logger.debug("Finished key word search: {}.".format(result))
logger.debug("Formatting key word search results.")
fresult = result.strip('#').split('#') if result else []
if weighted:
weights, words = [], []
for w in fresult:
result = w.split('/')
word, weight = result[0], result[2]
weight = _to_float(weight)
weights.append(weight or 0.0)
words.append(word)
fresult = zip(words, weights)
if is_python3:
# Return a list instead of a zip object in Python 3.
fresult = list(fresult)
logger.debug("Key words formatted: {}.".format(fresult))
return fresult
| {
"content_hash": "ce184ac730b994d4c5b20f54c0da3ccc",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 79,
"avg_line_length": 39.36363636363637,
"alnum_prop": 0.6126935249337097,
"repo_name": "tsroten/pynlpir",
"id": "349e74ce531f35410186ca21a5005d4acdd9b05f",
"size": "11732",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pynlpir/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "947"
},
{
"name": "Python",
"bytes": "46765"
}
],
"symlink_target": ""
} |
import sys
import unittest
import getpass
import inspect
import atom.mock_http_core
import gdata.gauth
"""Loads configuration for tests which connect to Google servers.
Settings used in tests are stored in a ConfigCollection instance in this
module called options. If your test needs to get a test related setting,
use
import gdata.test_config
option_value = gdata.test_config.options.get_value('x')
The above will check the command line for an '--x' argument, and if not
found will either use the default value for 'x' or prompt the user to enter
one.
Your test can override the value specified by the user by performing:
gdata.test_config.options.set_value('x', 'y')
If your test uses a new option which you would like to allow the user to
specify on the command line or via a prompt, you can use the register_option
method as follows:
gdata.test_config.options.register(
'option_name', 'Prompt shown to the user', secret=False #As for password.
'This is the description of the option, shown when help is requested.',
'default value, provide only if you do not want the user to be prompted')
"""
class Option(object):
def __init__(self, name, prompt, secret=False, description=None, default=None):
self.name = name
self.prompt = prompt
self.secret = secret
self.description = description
self.default = default
def get(self):
value = self.default
# Check for a command line parameter.
for i in range(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % self.name):
value = sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % self.name:
value = sys.argv[i + 1]
# If the param was not on the command line, ask the user to input the
# value.
# In order for this to prompt the user, the default value for the option
# must be None.
if value is None:
prompt = '%s: ' % self.prompt
if self.secret:
value = getpass.getpass(prompt)
else:
print(('You can specify this on the command line using --%s' % self.name))
value = eval(input(prompt))
return value
class ConfigCollection(object):
def __init__(self, options=None):
self.options = options or {}
self.values = {}
def register_option(self, option):
self.options[option.name] = option
def register(self, *args, **kwargs):
self.register_option(Option(*args, **kwargs))
def get_value(self, option_name):
if option_name in self.values:
return self.values[option_name]
value = self.options[option_name].get()
if value is not None:
self.values[option_name] = value
return value
def set_value(self, option_name, value):
self.values[option_name] = value
def render_usage(self):
message_parts = []
for opt_name, option in list(self.options.items()):
message_parts.append('--%s: %s' % (opt_name, option.description))
return '\n'.join(message_parts)
options = ConfigCollection()
# Register the default options.
options.register(
'username',
'Please enter the email address of your test account',
description=('The email address you want to sign in with. '
'Make sure this is a test account as these tests may edit'
' or delete data.'))
options.register(
'password',
'Please enter the password for your test account',
secret=True, description='The test account password.')
options.register(
'clearcache',
'Delete cached data? (enter true or false)',
description=('If set to true, any temporary files which cache test'
' requests and responses will be deleted.'),
default='true')
options.register(
'savecache',
'Save requests and responses in a temporary file? (enter true or false)',
description=('If set to true, requests to the server and responses will'
' be saved in temporary files.'),
default='false')
options.register(
'runlive',
'Run the live tests which contact the server? (enter true or false)',
description=('If set to true, the tests will make real HTTP requests to'
' the servers. This slows down test execution and may'
' modify the users data, be sure to use a test account.'),
default='true')
options.register(
'host',
'Run the live tests against the given host',
description='Examples: docs.google.com, spreadsheets.google.com, etc.',
default='')
options.register(
'ssl',
'Run the live tests over SSL (enter true or false)',
description='If set to true, all tests will be performed over HTTPS (SSL)',
default='false')
options.register(
'clean',
'Clean ALL data first before and after each test (enter true or false)',
description='If set to true, all tests will remove all data (DANGEROUS)',
default='false')
options.register(
'appsusername',
'Please enter the email address of your test Apps domain account',
description=('The email address you want to sign in with. '
'Make sure this is a test account on your Apps domain as '
'these tests may edit or delete data.'))
options.register(
'appspassword',
'Please enter the password for your test Apps domain account',
secret=True, description='The test Apps account password.')
# Other options which may be used if needed.
BLOG_ID_OPTION = Option(
'blogid',
'Please enter the ID of your test blog',
description=('The blog ID for the blog which should have test posts added'
' to it. Example 7682659670455539811'))
TEST_IMAGE_LOCATION_OPTION = Option(
'imgpath',
'Please enter the full path to a test image to upload',
description=('This test image will be uploaded to a service which'
' accepts a media file, it must be a jpeg.'))
SPREADSHEET_ID_OPTION = Option(
'spreadsheetid',
'Please enter the ID of a spreadsheet to use in these tests',
description=('The spreadsheet ID for the spreadsheet which should be'
' modified by theses tests.'))
APPS_DOMAIN_OPTION = Option(
'appsdomain',
'Please enter your Google Apps domain',
description=('The domain the Google Apps is hosted on or leave blank'
' if n/a'))
SITES_NAME_OPTION = Option(
'sitename',
'Please enter name of your Google Site',
description='The webspace name of the Site found in its URL.')
PROJECT_NAME_OPTION = Option(
'project_name',
'Please enter the name of your project hosting project',
description=('The name of the project which should have test issues added'
' to it. Example gdata-python-client'))
ISSUE_ASSIGNEE_OPTION = Option(
'issue_assignee',
'Enter the email address of the target owner of the updated issue.',
description=('The email address of the user a created issue\'s owner will '
' become. Example testuser2@gmail.com'))
GA_TABLE_ID = Option(
'table_id',
'Enter the Table ID of the Google Analytics profile to test',
description=('The Table ID of the Google Analytics profile to test.'
' Example ga:1174'))
TARGET_USERNAME_OPTION = Option(
'targetusername',
'Please enter the username (without domain) of the user which will be'
' affected by the tests',
description=('The username of the user to be tested'))
YT_DEVELOPER_KEY_OPTION = Option(
'developerkey',
'Please enter your YouTube developer key',
description=('The YouTube developer key for your account'))
YT_CLIENT_ID_OPTION = Option(
'clientid',
'Please enter your YouTube client ID',
description=('The YouTube client ID for your account'))
YT_VIDEO_ID_OPTION= Option(
'videoid',
'Please enter the ID of a YouTube video you uploaded',
description=('The video ID of a YouTube video uploaded to your account'))
# Functions to inject a cachable HTTP client into a service client.
def configure_client(client, case_name, service_name, use_apps_auth=False):
"""Sets up a mock client which will reuse a saved session.
Should be called during setUp of each unit test.
Handles authentication to allow the GDClient to make requests which
require an auth header.
Args:
client: a gdata.GDClient whose http_client member should be replaced
with a atom.mock_http_core.MockHttpClient so that repeated
executions can used cached responses instead of contacting
the server.
case_name: str The name of the test case class. Examples: 'BloggerTest',
'ContactsTest'. Used to save a session
for the ClientLogin auth token request, so the case_name
should be reused if and only if the same username, password,
and service are being used.
service_name: str The service name as used for ClientLogin to identify
the Google Data API being accessed. Example: 'blogger',
'wise', etc.
use_apps_auth: bool (optional) If set to True, use appsusername and
appspassword command-line args instead of username and
password respectively.
"""
# Use a mock HTTP client which will record and replay the HTTP traffic
# from these tests.
client.http_client = atom.mock_http_core.MockHttpClient()
client.http_client.cache_case_name = case_name
# Getting the auth token only needs to be done once in the course of test
# runs.
auth_token_key = '%s_auth_token' % service_name
if (auth_token_key not in options.values
and options.get_value('runlive') == 'true'):
client.http_client.cache_test_name = 'client_login'
cache_name = client.http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
client.http_client.delete_session(cache_name)
client.http_client.use_cached_session(cache_name)
if not use_apps_auth:
username = options.get_value('username')
password = options.get_value('password')
else:
username = options.get_value('appsusername')
password = options.get_value('appspassword')
auth_token = client.client_login(username, password, case_name,
service=service_name)
options.values[auth_token_key] = gdata.gauth.token_to_blob(auth_token)
if client.alt_auth_service is not None:
options.values[client.alt_auth_service] = gdata.gauth.token_to_blob(
client.alt_auth_token)
client.http_client.close_session()
# Allow a config auth_token of False to prevent the client's auth header
# from being modified.
if auth_token_key in options.values:
client.auth_token = gdata.gauth.token_from_blob(
options.values[auth_token_key])
if client.alt_auth_service is not None:
client.alt_auth_token = gdata.gauth.token_from_blob(
options.values[client.alt_auth_service])
if options.get_value('host'):
client.host = options.get_value('host')
def configure_cache(client, test_name):
"""Loads or begins a cached session to record HTTP traffic.
Should be called at the beginning of each test method.
Args:
client: a gdata.GDClient whose http_client member has been replaced
with a atom.mock_http_core.MockHttpClient so that repeated
executions can used cached responses instead of contacting
the server.
test_name: str The name of this test method. Examples:
'TestClass.test_x_works', 'TestClass.test_crud_operations'.
This is used to name the recording of the HTTP requests and
responses, so it should be unique to each test method in the
test case.
"""
# Auth token is obtained in configure_client which is called as part of
# setUp.
client.http_client.cache_test_name = test_name
cache_name = client.http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
client.http_client.delete_session(cache_name)
client.http_client.use_cached_session(cache_name)
def close_client(client):
"""Saves the recoded responses to a temp file if the config file allows.
This should be called in the unit test's tearDown method.
Checks to see if the 'savecache' option is set to 'true', to make sure we
only save sessions to repeat if the user desires.
"""
if client and options.get_value('savecache') == 'true':
# If this was a live request, save the recording.
client.http_client.close_session()
def configure_service(service, case_name, service_name):
"""Sets up a mock GDataService v1 client to reuse recorded sessions.
Should be called during setUp of each unit test. This is a duplicate of
configure_client, modified to handle old v1 service classes.
"""
service.http_client.v2_http_client = atom.mock_http_core.MockHttpClient()
service.http_client.v2_http_client.cache_case_name = case_name
# Getting the auth token only needs to be done once in the course of test
# runs.
auth_token_key = 'service_%s_auth_token' % service_name
if (auth_token_key not in options.values
and options.get_value('runlive') == 'true'):
service.http_client.v2_http_client.cache_test_name = 'client_login'
cache_name = service.http_client.v2_http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
service.http_client.v2_http_client.delete_session(cache_name)
service.http_client.v2_http_client.use_cached_session(cache_name)
service.ClientLogin(options.get_value('username'),
options.get_value('password'),
service=service_name, source=case_name)
options.values[auth_token_key] = service.GetClientLoginToken()
service.http_client.v2_http_client.close_session()
if auth_token_key in options.values:
service.SetClientLoginToken(options.values[auth_token_key])
def configure_service_cache(service, test_name):
"""Loads or starts a session recording for a v1 Service object.
Duplicates the behavior of configure_cache, but the target for this
function is a v1 Service object instead of a v2 Client.
"""
service.http_client.v2_http_client.cache_test_name = test_name
cache_name = service.http_client.v2_http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
service.http_client.v2_http_client.delete_session(cache_name)
service.http_client.v2_http_client.use_cached_session(cache_name)
def close_service(service):
if service and options.get_value('savecache') == 'true':
# If this was a live request, save the recording.
service.http_client.v2_http_client.close_session()
def build_suite(classes):
"""Creates a TestSuite for all unit test classes in the list.
Assumes that each of the classes in the list has unit test methods which
begin with 'test'. Calls unittest.makeSuite.
Returns:
A new unittest.TestSuite containing a test suite for all classes.
"""
suites = [unittest.makeSuite(a_class, 'test') for a_class in classes]
return unittest.TestSuite(suites)
def check_data_classes(test, classes):
import inspect
for data_class in classes:
test.assertTrue(data_class.__doc__ is not None,
'The class %s should have a docstring' % data_class)
if hasattr(data_class, '_qname'):
qname_versions = None
if isinstance(data_class._qname, tuple):
qname_versions = data_class._qname
else:
qname_versions = (data_class._qname,)
for versioned_qname in qname_versions:
test.assertTrue(isinstance(versioned_qname, str),
'The class %s has a non-string _qname' % data_class)
test.assertTrue(not versioned_qname.endswith('}'),
'The _qname for class %s is only a namespace' % (
data_class))
for attribute_name, value in list(data_class.__dict__.items()):
# Ignore all elements that start with _ (private members)
if not attribute_name.startswith('_'):
try:
if not (isinstance(value, str) or inspect.isfunction(value)
or (isinstance(value, list)
and issubclass(value[0], atom.core.XmlElement))
or type(value) == property # Allow properties.
or inspect.ismethod(value) # Allow methods.
or inspect.ismethoddescriptor(value) # Allow method descriptors.
# staticmethod et al.
or issubclass(value, atom.core.XmlElement)):
test.fail(
'XmlElement member should have an attribute, XML class,'
' or list of XML classes as attributes.')
except TypeError:
test.fail('Element %s in %s was of type %s' % (
attribute_name, data_class._qname, type(value)))
def check_clients_with_auth(test, classes):
for client_class in classes:
test.assertTrue(hasattr(client_class, 'api_version'))
test.assertTrue(isinstance(client_class.auth_service, (str, int)))
test.assertTrue(hasattr(client_class, 'auth_service'))
test.assertTrue(isinstance(client_class.auth_service, str))
test.assertTrue(hasattr(client_class, 'auth_scopes'))
test.assertTrue(isinstance(client_class.auth_scopes, (list, tuple)))
| {
"content_hash": "431d9dc5c91d01a2fb5956ab3d09fb4b",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 82,
"avg_line_length": 40.716981132075475,
"alnum_prop": 0.6749304911955515,
"repo_name": "webmedic/booker",
"id": "d6310ea9817c7d65a8fc5c4438770ecf8f758d8e",
"size": "17869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gdata/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5995"
},
{
"name": "CSS",
"bytes": "17900"
},
{
"name": "Python",
"bytes": "4133180"
}
],
"symlink_target": ""
} |
import numpy as np
from net import Controller
from director import vtkAll as vtk
from director.debugVis import DebugData
from director import ioUtils, filterUtils
class MovingObject(object):
"""Moving object."""
def __init__(self, velocity, polydata):
"""Constructs a MovingObject.
Args:
velocity: Velocity.
polydata: Polydata.
"""
self._state = np.array([0., 0., 0.])
self._velocity = float(velocity)
self._raw_polydata = polydata
self._polydata = polydata
self._sensors = []
@property
def x(self):
"""X coordinate."""
return self._state[0]
@x.setter
def x(self, value):
"""X coordinate."""
next_state = self._state.copy()
next_state[0] = float(value)
self._update_state(next_state)
@property
def y(self):
"""Y coordinate."""
return self._state[1]
@y.setter
def y(self, value):
"""Y coordinate."""
next_state = self._state.copy()
next_state[1] = float(value)
self._update_state(next_state)
@property
def theta(self):
"""Yaw in radians."""
return self._state[2]
@theta.setter
def theta(self, value):
"""Yaw in radians."""
next_state = self._state.copy()
next_state[2] = float(value) % (2 * np.pi)
self._update_state(next_state)
@property
def velocity(self):
"""Velocity."""
return self._velocity
@velocity.setter
def velocity(self, value):
"""Velocity."""
self._velocity = float(value)
@property
def sensors(self):
"""List of attached sensors."""
return self._sensors
def attach_sensor(self, sensor):
"""Attaches a sensor.
Args:
sensor: Sensor.
"""
self._sensors.append(sensor)
def _dynamics(self, state, t, controller=None):
"""Dynamics of the object.
Args:
state: Initial condition.
t: Time.
Returns:
Derivative of state at t.
"""
dqdt = np.zeros_like(state)
dqdt[0] = self._velocity * np.cos(state[2])
dqdt[1] = self._velocity * np.sin(state[2])
dqdt[2] = self._control(state, t)
return dqdt * t
def _control(self, state, t):
"""Returns the yaw given state.
Args:
state: State.
t: Time.
Returns:
Yaw.
"""
raise NotImplementedError
def _simulate(self, dt):
"""Simulates the object moving.
Args:
dt: Time length of step.
Returns:
New state.
"""
return self._state + self._dynamics(self._state, dt)
def move(self, dt=1.0/30.0):
"""Moves the object by a given time step.
Args:
dt: Length of time step.
"""
state = self._simulate(dt)
self._update_state(state)
def _update_state(self, next_state):
"""Updates the moving object's state.
Args:
next_state: New state.
"""
t = vtk.vtkTransform()
t.Translate([next_state[0], next_state[1], 0.])
t.RotateZ(np.degrees(next_state[2]))
self._polydata = filterUtils.transformPolyData(self._raw_polydata, t)
self._state = next_state
list(map(lambda s: s.update(*self._state), self._sensors))
def to_positioned_polydata(self):
"""Converts object to visualizable poly data.
Note: Transformations have been already applied to this.
"""
return self._polydata
def to_polydata(self):
"""Converts object to visualizable poly data.
Note: This is centered at (0, 0, 0) and is not rotated.
"""
return self._raw_polydata
class Robot(MovingObject):
"""Robot."""
def __init__(self, velocity=25.0, scale=0.15, exploration=0.5,
model="A10.obj"):
"""Constructs a Robot.
Args:
velocity: Velocity of the robot in the forward direction.
scale: Scale of the model.
exploration: Exploration rate.
model: Object model to use.
"""
self._target = (0, 0)
self._exploration = exploration
t = vtk.vtkTransform()
t.Scale(scale, scale, scale)
polydata = ioUtils.readPolyData(model)
polydata = filterUtils.transformPolyData(polydata, t)
super(Robot, self).__init__(velocity, polydata)
self._ctrl = Controller()
def move(self, dt=1.0/30.0):
"""Moves the object by a given time step.
Args:
dt: Length of time step.
"""
gamma = 0.9
prev_xy = self._state[0], self._state[1]
prev_state = self._get_state()
prev_utilities = self._ctrl.evaluate(prev_state)
super(Robot, self).move(dt)
next_state = self._get_state()
next_utilities = self._ctrl.evaluate(next_state)
print("action: {}, utility: {}".format(
self._selected_i, prev_utilities[self._selected_i]))
terminal = self._sensors[0].has_collided()
curr_reward = self._get_reward(prev_xy)
total_reward =\
curr_reward if terminal else \
curr_reward + gamma * next_utilities[self._selected_i]
rewards = [total_reward if i == self._selected_i else prev_utilities[i]
for i in range(len(next_utilities))]
self._ctrl.train(prev_state, rewards)
def set_target(self, target):
self._target = target
def set_controller(self, ctrl):
self._ctrl = ctrl
def at_target(self, threshold=3):
"""Return whether the robot has reached its target.
Args:
threshold: Target distance threshold.
Returns:
True if target is reached.
"""
return (abs(self._state[0] - self._target[0]) <= threshold and
abs(self._state[1] - self._target[1]) <= threshold)
def _get_reward(self, prev_state):
prev_dx = self._target[0] - prev_state[0]
prev_dy = self._target[1] - prev_state[1]
prev_distance = np.sqrt(prev_dx ** 2 + prev_dy ** 2)
new_dx = self._target[0] - self._state[0]
new_dy = self._target[1] - self._state[1]
new_distance = np.sqrt(new_dx ** 2 + new_dy ** 2)
if self._sensors[0].has_collided():
return -20
elif self.at_target():
return 15
else:
delta_distance = prev_distance - new_distance
angle_distance = -abs(self._angle_to_destination()) / 4
obstacle_ahead = self._sensors[0].distances[8] - 1
return delta_distance + angle_distance + obstacle_ahead
def _angle_to_destination(self):
x, y = self._target[0] - self.x, self._target[1] - self.y
return self._wrap_angles(np.arctan2(y, x) - self.theta)
def _wrap_angles(self, a):
return (a + np.pi) % (2 * np.pi) - np.pi
def _get_state(self):
dx, dy = self._target[0] - self.x, self._target[1] - self.y
curr_state = [dx / 1000, dy / 1000, self._angle_to_destination()]
return np.hstack([curr_state, self._sensors[0].distances])
def _control(self, state, t):
"""Returns the yaw given state.
Args:
state: State.
t: Time.
Returns:
Yaw.
"""
actions = [-np.pi/2, 0., np.pi/2]
utilities = self._ctrl.evaluate(self._get_state())
optimal_i = np.argmax(utilities)
if np.random.random() <= self._exploration:
optimal_i = np.random.choice([0, 1, 2])
optimal_a = actions[optimal_i]
self._selected_i = optimal_i
return optimal_a
class Obstacle(MovingObject):
"""Obstacle."""
def __init__(self, velocity, radius, bounds, height=1.0):
"""Constructs a Robot.
Args:
velocity: Velocity of the robot in the forward direction.
radius: Radius of the obstacle.
"""
data = DebugData()
self._bounds = bounds
self._radius = radius
self._height = height
center = [0, 0, height / 2 - 0.5]
axis = [0, 0, 1] # Upright cylinder.
data.addCylinder(center, axis, height, radius)
polydata = data.getPolyData()
super(Obstacle, self).__init__(velocity, polydata)
def _control(self, state, t):
"""Returns the yaw given state.
Args:
state: State.
t: Time.
Returns:
Yaw.
"""
x_min, x_max, y_min, y_max = self._bounds
x, y, theta = state
if x - self._radius <= x_min:
return np.pi
elif x + self._radius >= x_max:
return np.pi
elif y - self._radius <= y_min:
return np.pi
elif y + self._radius >= y_max:
return np.pi
return 0.
| {
"content_hash": "067b62603c84883f9259625942bc73c8",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 79,
"avg_line_length": 28.38050314465409,
"alnum_prop": 0.540831024930748,
"repo_name": "anassinator/dqn-obstacle-avoidance",
"id": "1ef3500df417636c9ae3b0088446ad11cba3330e",
"size": "9050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moving_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27777"
}
],
"symlink_target": ""
} |
from tokit.models import Token
def get_api_key(aKey) :
try :
token = Token.objects.get( key = aKey, is_valid=True )
return token
except Token.DoesNotExist :
return None
def extract_api_key(request):
if hasattr(request, "META"):
if request.META.get("HTTP_API_KEY") : # HTTP is dynamicaly added by django !@#$$!
return request.META.get("HTTP_API_KEY")
if request.method:
method = getattr(request, request.method)
if method.get("api_key") :
return method.get("api_key")
return None
| {
"content_hash": "227bb73826be5e18f482522a073ed6e8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 89,
"avg_line_length": 30.42105263157895,
"alnum_prop": 0.6072664359861591,
"repo_name": "nfb-onf/tokit",
"id": "23ea3ec555a165421def1f2054848df51416c422",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tokit/key_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53652"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
try:
from astropy.io import fits as pyfits
except ImportError:
try:
import pyfits
except ImportError:
raise ImportError('You need astropy or pyfits modules')
import os
import numpy as np
def TPF2Im(filename,newdir=True):
"""
Convert Kepler/K2 TPF to a bunch of images
"""
with pyfits.open(filename) as f:
objname = f[0].header['OBJECT']
#replace spaces with underscore in objname
objname = objname.replace (" ", "_")
fluxarr = f[1].data['FLUX']
cadnum = f[1].data['CADENCENO']
time = f[1].data['TIME']
quality = f[1].data['QUALITY']
if newdir:
outdir = objname
try:
os.stat(outdir)
except:
os.mkdir(outdir)
else:
outdir = '.'
outname = np.array([],dtype=str)
for num,image in enumerate(fluxarr):
hdu = pyfits.PrimaryHDU(image)
hdulist = pyfits.HDUList([hdu])
hdulist.writeto('{0}/{1}-{2:05d}.fits'.format(outdir,objname,cadnum[num]))
outname = np.append(outname,'{0}-{1:05d}.fits'.format(objname,cadnum[num]))
fmt = ['%s','%.9f','%i',]
outarr = np.array([outname,time,quality]).T
np.savetxt('{0}/{1}_filelist.txt'.format(outdir,objname), outarr,fmt='%s')
if __name__ == '__main__':
## test on my local machine if it works
path = '../data'
fn = path + '/kplr060017806-2014044044430_lpd-targ.fits'
TPF2Im(fn)
| {
"content_hash": "e25a58bd76dccd3a22047e24b9840ee0",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 88,
"avg_line_length": 28,
"alnum_prop": 0.5612244897959183,
"repo_name": "mrtommyb/TPFtoImages",
"id": "446672472794e0304a165b54b6e182ebc5b87165",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/convertTPFtoImages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1568"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ConfidentialLedgerConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ConfidentialLedger.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g.
00000000-0000-0000-0000-000000000000). Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2020-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(ConfidentialLedgerConfiguration, self).__init__(**kwargs)
api_version: Literal["2020-12-01-preview"] = kwargs.pop("api_version", "2020-12-01-preview")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-confidentialledger/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "dfe598b200bf7d90dee0c1ba6cec50ed",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 107,
"avg_line_length": 52.49230769230769,
"alnum_prop": 0.7215709261430246,
"repo_name": "Azure/azure-sdk-for-python",
"id": "377c11fab5174e80515fbcb2fb62d8cdd4b10f84",
"size": "3880",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/confidentialledger/azure-mgmt-confidentialledger/azure/mgmt/confidentialledger/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from flask import render_template, current_app, send_from_directory, abort
from flask.ext.login import login_required, current_user
import os
from .blueprint import *
from ..models import *
@roster.route('/')
@roster.permission_required('view-roster')
@roster.menu('index', None)
def index():
return render_template('roster/index.html')
@roster.route('/people')
@roster.permission_required('view-roster')
@roster.menu('people', 'index', icon='icon-book')
def people():
return render_template('roster/people.html')
@roster.route('/badge/<int:id>.jpeg')
@login_required
def badge_photo(id):
# Allow current user to see their own photo; otherwise need view-photo.
if current_user.person_id == id:
pass
elif not current_user.has_permissions(set('view-photo')):
abort(403)
dir_path = current_app.config['BADGE_FOLDER']
fn = '%d.jpeg' % id
return send_from_directory(dir_path, fn, as_attachment=False)
from .contact import *
from .info import *
from .config import *
from .signin import *
| {
"content_hash": "bb0da7773be55039ae7ef8c93ffdd50c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 25.414634146341463,
"alnum_prop": 0.7005758157389635,
"repo_name": "team294/surfmanage",
"id": "8cf54138af4b16d47ff1c8029e1bc777f391c8bf",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surfmanage/roster/views/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "76796"
},
{
"name": "Python",
"bytes": "142919"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from pool_app.models import *
import json
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.views.decorators.csrf import csrf_protect
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import logging
logger = logging.getLogger(__name__)
# Create your views here.
@csrf_exempt
def add_user(request):
if request.method == 'POST':
data = request.body
errors = []
pool_user_data = json.loads(data)
user_id = pool_user_data['unique_id']
age = pool_user_data['age']
gender = pool_user_data['gender']
mobile_number = pool_user_data['mobile_number']
location = pool_user_data['location']
try:
pool_user = PoolUsers(user_id=user_id,age=age,
gender=gender,mobile=mobile_number,location=location)
pool_user.save()
return HttpResponse('Pool user Created')
except IntegrityError, message:
print "exceptions occured and error is "
error_code = message[0]
print error_code
error = ''
if error_code == 1062:
error = "uniquie is already existing"
print error
errors.append(error)
return HttpResponse('Pool user not created')
@csrf_exempt
def find_hashtag(request):
if request.method == 'POST':
data = request.body
pool_data = json.loads(data)
pool_tag = pool_data['hash_tag']
try:
pool_tag_details = PoolTagMaster.objects.get(pk = pool_tag)
return HttpResponse("yes")
except ObjectDoesNotExist:
return HttpResponse("no")
@csrf_exempt
def search_question(request):
if request.method == 'POST':
data = request.body
pool_data = json.loads(data)
pool_tag = pool_data['hash_tag']
details = []
try:
pool_tag_details = PoolTagMaster.objects.get(pk=pool_tag)
details.append({"pool_tag":pool_tag_details.pool_tag,
"question":pool_tag_details.question,
"option1":pool_tag_details.option1,
"option2":pool_tag_details.option1,
"option3":pool_tag_details.option1,
"option4":pool_tag_details.option1,
# "pool_start":pool_tag_details.pool_start,
# "pool_end":pool_tag_details.pool_end
})
return HttpResponse(json.dumps(details), content_type="application/json")
except ObjectDoesNotExist:
return HttpResponse("no")
@csrf_exempt
def create_question(request):
if request.method == 'POST':
data = request.body
question_details = json.loads(data)
pool_tag = question_details['hash_tag']
user_id = question_details['unique_id']
question = question_details['question']
age = question_details['age']
gender = question_details['gender']
option1 = question_details['option1']
option2 = question_details['option2']
option3 = question_details['option3']
option4 = question_details['option4']
start_time = question_details['start_time']
end_time = question_details['end_time']
result_private = question_details['result_private']
post_question = PoolTagMaster(pool_tag=pool_tag,question=question,
option1=option1,option2=option2,
option3=option3,option4=option4,
age=age,gender=gender,
user_id=user_id,pool_start=start_time,
pool_end=end_time,
option1_count=0,option2_count=0,
option3_count=0,option4_count=0,
)
post_question.save()
# return HttpResponse(json.dumps(response), content_type="application/json")
return HttpResponse(pool_tag)
@csrf_exempt
def answer_this_question(request):
if request.method == 'POST':
data = request.body
answer_details = json.loads(data)
pool_tag = answer_details['hash_tag']
option_selected = answer_details['option_selected']
option_text = answer_details['option_text']
user_id = answer_details['unique_id']
age = answer_details['age']
gender = answer_details['gender']
try:
post_answer = PoolTagAnswer(pool_tag_id=pool_tag,
option_selected=option_selected,
option_text=option_text,
user_id=user_id,
age=age,gender=gender)
post_answer.save()
pool_count = PoolTagMaster.objects.get(pk=pool_tag)
if option_selected == 1:
pool_count.option1_count = pool_count.option1_count+1
if option_selected == 2:
pool_count.option2_count = pool_count.option2_count+1
if option_selected == 3:
pool_count.option3_count = pool_count.option3_count+1
if option_selected == 4:
pool_count.option4_count = pool_count.option4_count+1
pool_count.save()
return HttpResponse("success")
except:
return HttpResponse("failure")
@csrf_exempt
def get_answers(request):
if request.method == 'POST':
data = request.body
pool_data = json.loads(data)
pool_tag = pool_data['hash_tag']
pool_results = []
try:
pool_details = PoolTagMaster.objects.get(pk=pool_tag)
pool_results.append({"pool_tag":pool_details.pool_tag,
"option1":pool_details.option1,
"option2":pool_details.option2,
"option3":pool_details.option3,
"option4":pool_details.option4,
"option1_count":pool_details.option1_count,
"option2_count":pool_details.option2_count,
"option3_count":pool_details.option3_count,
"option4_count":pool_details.option4_count
})
return HttpResponse(json.dumps(pool_results), content_type="application/json")
except ObjectDoesNotExist:
return HttpResponse("invalid_hash_tag") | {
"content_hash": "36e820abaebab39267a87ad691e788c9",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 81,
"avg_line_length": 32.619883040935676,
"alnum_prop": 0.6964861957690929,
"repo_name": "kantanand/pool",
"id": "4d2605339896b4365124ea54d444db2191c5abc3",
"size": "5578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "11394"
},
{
"name": "Python",
"bytes": "25152"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import threading
import spotify
from spotify import ffi, lib, serialized, utils
__all__ = ['Artist', 'ArtistBrowser', 'ArtistBrowserType']
logger = logging.getLogger(__name__)
class Artist(object):
"""A Spotify artist.
You can get artists from tracks and albums, or you can create an
:class:`Artist` yourself from a Spotify URI::
>>> session = spotify.Session()
# ...
>>> artist = session.get_artist(
... 'spotify:artist:22xRIphSN7IkPVbErICu7s')
>>> artist.load().name
u'Rob Dougan'
"""
def __init__(self, session, uri=None, sp_artist=None, add_ref=True):
assert uri or sp_artist, 'uri or sp_artist is required'
self._session = session
if uri is not None:
artist = spotify.Link(self._session, uri=uri).as_artist()
if artist is None:
raise ValueError(
'Failed to get artist from Spotify URI: %r' % uri
)
sp_artist = artist._sp_artist
if add_ref:
lib.sp_artist_add_ref(sp_artist)
self._sp_artist = ffi.gc(sp_artist, lib.sp_artist_release)
def __repr__(self):
return 'Artist(%r)' % self.link.uri
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._sp_artist == other._sp_artist
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._sp_artist)
@property
@serialized
def name(self):
"""The artist's name.
Will always return :class:`None` if the artist isn't loaded.
"""
name = utils.to_unicode(lib.sp_artist_name(self._sp_artist))
return name if name else None
@property
def is_loaded(self):
"""Whether the artist's data is loaded."""
return bool(lib.sp_artist_is_loaded(self._sp_artist))
def load(self, timeout=None):
"""Block until the artist's data is loaded.
After ``timeout`` seconds with no results :exc:`~spotify.Timeout` is
raised. If ``timeout`` is :class:`None` the default timeout is used.
The method returns ``self`` to allow for chaining of calls.
"""
return utils.load(self._session, self, timeout=timeout)
@serialized
def portrait(self, image_size=None, callback=None):
"""The artist's portrait :class:`Image`.
``image_size`` is an :class:`ImageSize` value, by default
:attr:`ImageSize.NORMAL`.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, an :class:`Image` instance, when
the image is done loading.
Will always return :class:`None` if the artist isn't loaded or the
artist has no portrait.
"""
if image_size is None:
image_size = spotify.ImageSize.NORMAL
portrait_id = lib.sp_artist_portrait(self._sp_artist, int(image_size))
if portrait_id == ffi.NULL:
return None
sp_image = lib.sp_image_create(self._session._sp_session, portrait_id)
return spotify.Image(
self._session, sp_image=sp_image, add_ref=False, callback=callback
)
def portrait_link(self, image_size=None):
"""A :class:`Link` to the artist's portrait.
``image_size`` is an :class:`ImageSize` value, by default
:attr:`ImageSize.NORMAL`.
This is equivalent with ``artist.portrait(image_size).link``, except
that this method does not need to create the artist portrait image
object to create a link to it.
"""
if image_size is None:
image_size = spotify.ImageSize.NORMAL
sp_link = lib.sp_link_create_from_artist_portrait(
self._sp_artist, int(image_size)
)
return spotify.Link(self._session, sp_link=sp_link, add_ref=False)
@property
def link(self):
"""A :class:`Link` to the artist."""
sp_link = lib.sp_link_create_from_artist(self._sp_artist)
return spotify.Link(self._session, sp_link=sp_link, add_ref=False)
def browse(self, type=None, callback=None):
"""Get an :class:`ArtistBrowser` for the artist.
If ``type`` is :class:`None`, it defaults to
:attr:`ArtistBrowserType.FULL`.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, an :class:`ArtistBrowser` instance,
when the browser is done loading.
Can be created without the artist being loaded.
"""
return spotify.ArtistBrowser(
self._session, artist=self, type=type, callback=callback
)
class ArtistBrowser(object):
"""An artist browser for a Spotify artist.
You can get an artist browser from any :class:`Artist` instance by calling
:meth:`Artist.browse`::
>>> session = spotify.Session()
# ...
>>> artist = session.get_artist(
... 'spotify:artist:421vyBBkhgRAOz4cYPvrZJ')
>>> browser = artist.browse()
>>> browser.load()
>>> len(browser.albums)
7
"""
def __init__(
self,
session,
artist=None,
type=None,
callback=None,
sp_artistbrowse=None,
add_ref=True,
):
assert (
artist or sp_artistbrowse
), 'artist or sp_artistbrowse is required'
self._session = session
self.loaded_event = threading.Event()
if sp_artistbrowse is None:
if type is None:
type = ArtistBrowserType.FULL
handle = ffi.new_handle((self._session, self, callback))
self._session._callback_handles.add(handle)
sp_artistbrowse = lib.sp_artistbrowse_create(
self._session._sp_session,
artist._sp_artist,
int(type),
_artistbrowse_complete_callback,
handle,
)
add_ref = False
if add_ref:
lib.sp_artistbrowse_add_ref(sp_artistbrowse)
self._sp_artistbrowse = ffi.gc(
sp_artistbrowse, lib.sp_artistbrowse_release
)
loaded_event = None
""":class:`threading.Event` that is set when the artist browser is loaded.
"""
def __repr__(self):
if self.is_loaded:
return 'ArtistBrowser(%r)' % self.artist.link.uri
else:
return 'ArtistBrowser(<not loaded>)'
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._sp_artistbrowse == other._sp_artistbrowse
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._sp_artistbrowse)
@property
def is_loaded(self):
"""Whether the artist browser's data is loaded."""
return bool(lib.sp_artistbrowse_is_loaded(self._sp_artistbrowse))
def load(self, timeout=None):
"""Block until the artist browser's data is loaded.
After ``timeout`` seconds with no results :exc:`~spotify.Timeout` is
raised. If ``timeout`` is :class:`None` the default timeout is used.
The method returns ``self`` to allow for chaining of calls.
"""
return utils.load(self._session, self, timeout=timeout)
@property
def error(self):
"""An :class:`ErrorType` associated with the artist browser.
Check to see if there was problems creating the artist browser.
"""
return spotify.ErrorType(
lib.sp_artistbrowse_error(self._sp_artistbrowse)
)
@property
def backend_request_duration(self):
"""The time in ms that was spent waiting for the Spotify backend to
create the artist browser.
Returns ``-1`` if the request was served from local cache. Returns
:class:`None` if the artist browser isn't loaded yet.
"""
if not self.is_loaded:
return None
return lib.sp_artistbrowse_backend_request_duration(
self._sp_artistbrowse
)
@property
@serialized
def artist(self):
"""Get the :class:`Artist` the browser is for.
Will always return :class:`None` if the artist browser isn't loaded.
"""
sp_artist = lib.sp_artistbrowse_artist(self._sp_artistbrowse)
if sp_artist == ffi.NULL:
return None
return Artist(self._session, sp_artist=sp_artist, add_ref=True)
@serialized
def portraits(self, callback=None):
"""The artist's portraits.
Due to limitations in libspotify's API you can't specify the
:class:`ImageSize` of these images.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, an :class:`Image` instance, when
the image is done loading. The callable will be called once for each
portrait.
Will always return an empty list if the artist browser isn't loaded.
"""
if not self.is_loaded:
return []
@serialized
def get_image(sp_artistbrowse, key):
image_id = lib.sp_artistbrowse_portrait(sp_artistbrowse, key)
sp_image = lib.sp_image_create(image_id)
return spotify.Image(
self._session,
sp_image=sp_image,
add_ref=False,
callback=callback,
)
return utils.Sequence(
sp_obj=self._sp_artistbrowse,
add_ref_func=lib.sp_artistbrowse_add_ref,
release_func=lib.sp_artistbrowse_release,
len_func=lib.sp_artistbrowse_num_portraits,
getitem_func=get_image,
)
@property
@serialized
def tracks(self):
"""The artist's tracks.
Will be an empty list if the browser was created with a ``type`` of
:attr:`ArtistBrowserType.NO_TRACKS` or
:attr:`ArtistBrowserType.NO_ALBUMS`.
Will always return an empty list if the artist browser isn't loaded.
"""
if not self.is_loaded:
return []
@serialized
def get_track(sp_artistbrowse, key):
return spotify.Track(
self._session,
sp_track=lib.sp_artistbrowse_track(sp_artistbrowse, key),
add_ref=True,
)
return utils.Sequence(
sp_obj=self._sp_artistbrowse,
add_ref_func=lib.sp_artistbrowse_add_ref,
release_func=lib.sp_artistbrowse_release,
len_func=lib.sp_artistbrowse_num_tracks,
getitem_func=get_track,
)
@property
@serialized
def tophit_tracks(self):
"""The artist's top hit tracks.
Will always return an empty list if the artist browser isn't loaded.
"""
if not self.is_loaded:
return []
@serialized
def get_track(sp_artistbrowse, key):
return spotify.Track(
self._session,
sp_track=lib.sp_artistbrowse_tophit_track(sp_artistbrowse, key),
add_ref=True,
)
return utils.Sequence(
sp_obj=self._sp_artistbrowse,
add_ref_func=lib.sp_artistbrowse_add_ref,
release_func=lib.sp_artistbrowse_release,
len_func=lib.sp_artistbrowse_num_tophit_tracks,
getitem_func=get_track,
)
@property
@serialized
def albums(self):
"""The artist's albums.
Will be an empty list if the browser was created with a ``type`` of
:attr:`ArtistBrowserType.NO_ALBUMS`.
Will always return an empty list if the artist browser isn't loaded.
"""
if not self.is_loaded:
return []
@serialized
def get_album(sp_artistbrowse, key):
return spotify.Album(
self._session,
sp_album=lib.sp_artistbrowse_album(sp_artistbrowse, key),
add_ref=True,
)
return utils.Sequence(
sp_obj=self._sp_artistbrowse,
add_ref_func=lib.sp_artistbrowse_add_ref,
release_func=lib.sp_artistbrowse_release,
len_func=lib.sp_artistbrowse_num_albums,
getitem_func=get_album,
)
@property
@serialized
def similar_artists(self):
"""The artist's similar artists.
Will always return an empty list if the artist browser isn't loaded.
"""
if not self.is_loaded:
return []
@serialized
def get_artist(sp_artistbrowse, key):
return spotify.Artist(
self._session,
sp_artist=lib.sp_artistbrowse_similar_artist(
sp_artistbrowse, key
),
add_ref=True,
)
return utils.Sequence(
sp_obj=self._sp_artistbrowse,
add_ref_func=lib.sp_artistbrowse_add_ref,
release_func=lib.sp_artistbrowse_release,
len_func=lib.sp_artistbrowse_num_similar_artists,
getitem_func=get_artist,
)
@property
@serialized
def biography(self):
"""A biography of the artist.
Will always return an empty string if the artist browser isn't loaded.
"""
return utils.to_unicode(
lib.sp_artistbrowse_biography(self._sp_artistbrowse)
)
@ffi.callback('void(sp_artistbrowse *, void *)')
@serialized
def _artistbrowse_complete_callback(sp_artistbrowse, handle):
logger.debug('artistbrowse_complete_callback called')
if handle == ffi.NULL:
logger.warning(
'pyspotify artistbrowse_complete_callback called without userdata'
)
return
(session, artist_browser, callback) = ffi.from_handle(handle)
session._callback_handles.remove(handle)
artist_browser.loaded_event.set()
if callback is not None:
callback(artist_browser)
@utils.make_enum('SP_ARTISTBROWSE_')
class ArtistBrowserType(utils.IntEnum):
pass
| {
"content_hash": "53c7cf58f9648dd7015c7ad8547d6c64",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 80,
"avg_line_length": 31.14161220043573,
"alnum_prop": 0.5831817545823422,
"repo_name": "mopidy/pyspotify",
"id": "867812ae9266ae06917136214cf2dddbd9fcd7b8",
"size": "14294",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2.x/master",
"path": "spotify/artist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "171987"
},
{
"name": "Python",
"bytes": "642108"
}
],
"symlink_target": ""
} |
"""Hardware interfaces for sound output"""
# Authors: Dan McCloy <drmccloy@uw.edu>
# Eric Larson <larsoner@uw.edu>
#
# License: BSD (3-clause)
import sys
import os
import warnings
import numpy as np
import pyglet
_use_silent = (os.getenv('_EXPYFUN_SILENT', '') == 'true')
_opts_dict = dict(linux2=('pulse',),
win32=('directsound',),
darwin=('openal',))
_opts_dict['linux'] = _opts_dict['linux2'] # new name on Py3k
_driver = _opts_dict[sys.platform] if not _use_silent else ('silent',)
pyglet.options['audio'] = _driver
# We might also want this at some point if we hit OSX problems:
# pyglet.options['shadow_window'] = False
# these must follow the above option setting, so PEP8 complains
try:
from pyglet.media import Player, AudioFormat, SourceGroup # noqa
try:
from pyglet.media import StaticMemorySource
except ImportError:
from pyglet.media.sources.base import StaticMemorySource # noqa
except Exception as exp:
warnings.warn('Pyglet could not be imported:\n%s' % exp)
Player = AudioFormat = SourceGroup = StaticMemorySource = object
from ._utils import logger, flush_logger # noqa
def _check_pyglet_audio():
if pyglet.media.get_audio_driver() is None:
raise SystemError('pyglet audio ("%s") could not be initialized'
% pyglet.options['audio'][0])
class SoundPlayer(Player):
def __init__(self, data, fs, loop=False):
assert AudioFormat is not None
super(SoundPlayer, self).__init__()
_check_pyglet_audio()
sms = _as_static(data, fs)
group = SourceGroup(sms.audio_format, None)
group.loop = bool(loop)
group.queue(sms)
self.queue(group)
self._ec_duration = sms._duration
def stop(self):
self.pause()
self.seek(0.)
@property
def playing(self):
"""Pyglet has this property, but it doesn't notice when it's finished
on its own..."""
return (super(SoundPlayer, self).playing and not
np.isclose(self.time, self._ec_duration))
class PygletSoundController(object):
"""Use pyglet audio capabilities"""
def __init__(self, ec, stim_fs):
logger.info('Expyfun: Setting up Pyglet audio')
assert AudioFormat is not None
self.fs = stim_fs
# Need to generate at RMS=1 to match TDT circuit
noise = np.random.normal(0, 1.0, int(self.fs * 15.)) # 15 secs
# Low-pass if necessary
if stim_fs < self.fs:
# note we can use cheap DFT method here b/c
# circular convolution won't matter for AWGN (yay!)
freqs = np.fft.rfftfreq(len(noise), 1. / self.fs)
noise = np.fft.rfft(noise)
noise[np.abs(freqs) > stim_fs / 2.] = 0.0
noise = np.fft.irfft(noise)
# ensure true RMS of 1.0 (DFT method also lowers RMS, compensate here)
noise = noise / np.sqrt(np.mean(noise * noise))
self.noise_array = np.array((noise, -1.0 * noise))
self.noise = SoundPlayer(self.noise_array, self.fs, loop=True)
self._noise_playing = False
self.audio = SoundPlayer(np.zeros((2, 1)), self.fs)
self.ec = ec
flush_logger()
def start_noise(self):
if not self._noise_playing:
self.noise.play()
self._noise_playing = True
def stop_noise(self):
if self._noise_playing:
self.noise.stop()
self._noise_playing = False
def load_buffer(self, samples):
self.audio.delete()
self.audio = SoundPlayer(samples.T, self.fs)
@property
def playing(self):
return self.audio.playing
def play(self):
self.audio.play()
self.ec._stamp_ttl_triggers([1])
def stop(self):
self.audio.stop()
def set_noise_level(self, level):
new_noise = SoundPlayer(self.noise_array * level, self.fs, loop=True)
if self._noise_playing:
self.stop_noise()
self.noise.delete()
self.noise = new_noise
self.start_noise()
else:
self.noise = new_noise
def halt(self):
self.stop()
self.stop_noise()
# cleanup pyglet instances
self.audio.delete()
self.noise.delete()
def _as_static(data, fs):
"""Helper to get data into the Pyglet audio format"""
fs = int(fs)
if data.ndim not in (1, 2):
raise ValueError('Data must have one or two dimensions')
n_ch = data.shape[0] if data.ndim == 2 else 1
audio_format = AudioFormat(channels=n_ch, sample_size=16,
sample_rate=fs)
data = data.T.ravel('C')
data[data < -1] = -1
data[data > 1] = 1
data = (data * (2 ** 15)).astype('int16').tostring()
return StaticMemorySourceFixed(data, audio_format)
class StaticMemorySourceFixed(StaticMemorySource):
"""Stupid class to fix old Pyglet bug"""
def _get_queue_source(self):
return self
| {
"content_hash": "27e10d4b00f321adc9689f16d8f42cca",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 78,
"avg_line_length": 31.425,
"alnum_prop": 0.6004375497215593,
"repo_name": "rkmaddox/expyfun",
"id": "0558e7ec3d74394335b43208c587c86c4a4a0f21",
"size": "5028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expyfun/_sound_controllers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1006"
},
{
"name": "Python",
"bytes": "462659"
}
],
"symlink_target": ""
} |
"""Test cases for /api/robot, mostly focusing on csv output"""
from unittest.mock import patch
import dateutil.parser
from django.utils import timezone
from guardian.shortcuts import assign_perm
from rest_framework.test import APIClient, APITestCase
from metaci.api.views.robot import RobotTestResultViewSet
from metaci.conftest import (
BranchFactory,
RepositoryFactory,
StaffSuperuserFactory,
TestResultFactory,
UserFactory,
)
from metaci.testresults.models import TestResult
class TestAPIRobot(APITestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.superuser = StaffSuperuserFactory()
cls.user = UserFactory()
cls.client = APIClient()
repo1 = RepositoryFactory(name="repo1")
repo2 = RepositoryFactory(name="repo2")
master = BranchFactory(name="master")
feature = BranchFactory(name="feature/robot")
# The default for queries is today's date, so we need to use that
# when creating results
time_end = timezone.make_aware(
dateutil.parser.parse("01:00:00"), timezone.get_current_timezone()
)
cls.today = time_end.strftime("%Y-%m-%d %H:%M:%S")
# One apex test, just to make sure it doesn't appear in any test results
TestResultFactory(
build_flow__time_end=time_end,
method__testclass__test_type="Apex",
outcome="Pass",
duration=0.1,
)
# ... and several robot tests, some passing, some failing
# oof. This is one place where I think black made the code much
# less readable than my hand-edited version.
for (
repo,
source,
outcome,
branch,
test_name,
tags,
robot_keyword,
message,
) in (
(repo1, "file1.robot", "Pass", master, "Passing 1", None, None, None),
(repo1, "file1.robot", "Pass", master, "Passing 2", None, None, None),
(
repo2,
"file2.robot",
"Fail",
feature,
"Failing 1",
"",
"KW1",
"epic fail",
),
(
repo2,
"file2.robot",
"Fail",
feature,
"Failing 2",
"",
"KW1",
"epic fail",
),
(
repo2,
"file3.robot",
"Fail",
feature,
"Failing 3",
"",
"KW2",
"epic fail",
),
(
repo2,
"file3.robot",
"Fail",
feature,
"Failing 4",
"t1,t2",
"KW3",
"ʃıɐɟ ɔıdǝ",
),
):
TestResultFactory(
method__testclass__test_type="Robot",
build_flow__build__repo=repo,
build_flow__build__branch=branch,
build_flow__time_end=time_end,
method__name=test_name,
outcome=outcome,
source_file=source,
robot_keyword=robot_keyword,
duration=0.1,
robot_tags=tags,
message=message,
)
def test_superuser_access(self):
"""Make sure the superuser can access the API"""
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot/")
assert response.status_code == 200
def test_unauthenticated_user_access(self):
"""Make sure an unauthenticated user cannot access the API"""
self.client.logout()
response = self.client.get("/api/robot.json/")
assert response.status_code == 401
def test_authenticated_user_access(self):
"""Make sure an authenticated user can access the API"""
self.client.force_authenticate(self.user)
response = self.client.get("/api/robot.json/")
assert response.status_code == 200
def test_result_returns_only_robot_tests(self):
"""Verify the query doesn't include Apex test results"""
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.json")
data = response.json()
# we should get 5 robot results, ignoring the one Apex results
assert (
TestResult.objects.filter(method__testclass__test_type="Apex").count() > 0
)
assert data["count"] == 6
def test_result_csv_format(self):
"""Verify we can get back csv results"""
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv")
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
f"2,Pass,{self.today},0.1,repo1,master,file1.robot,Passing 1,,,",
f"3,Pass,{self.today},0.1,repo1,master,file1.robot,Passing 2,,,",
f"4,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 1,,KW1,epic fail",
f"5,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 2,,KW1,epic fail",
f"6,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 3,,KW2,epic fail",
f'7,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 4,"t1,t2",KW3,ʃıɐɟ ɔıdǝ',
]
actual = response.content.decode().splitlines()
self.assertCountEqual(expected, actual)
def test_repo_filter(self):
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv?repo_name=repo2")
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
f"4,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 1,,KW1,epic fail",
f"5,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 2,,KW1,epic fail",
f"6,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 3,,KW2,epic fail",
f'7,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 4,"t1,t2",KW3,ʃıɐɟ ɔıdǝ',
]
actual = response.content.decode().splitlines()
self.assertCountEqual(expected, actual)
def test_branch_filter(self):
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv?branch_name=master")
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
f"2,Pass,{self.today},0.1,repo1,master,file1.robot,Passing 1,,,",
f"3,Pass,{self.today},0.1,repo1,master,file1.robot,Passing 2,,,",
]
actual = response.content.decode().splitlines()
self.assertCountEqual(expected, actual)
def test_outcome_filter(self):
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv?outcome=Fail")
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
f"4,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 1,,KW1,epic fail",
f"5,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 2,,KW1,epic fail",
f"6,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 3,,KW2,epic fail",
f'7,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 4,"t1,t2",KW3,ʃıɐɟ ɔıdǝ',
]
actual = response.content.decode().splitlines()
self.assertCountEqual(expected, actual)
def test_test_name_filter(self):
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv?test_name=Failing 2")
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
f"5,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 2,,KW1,epic fail",
]
actual = response.content.decode().splitlines()
self.assertCountEqual(expected, actual)
def test_source_file_filter(self):
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv?source_file=file3.robot")
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
f"6,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 3,,KW2,epic fail",
f'7,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 4,"t1,t2",KW3,ʃıɐɟ ɔıdǝ',
]
actual = response.content.decode().splitlines()
self.assertCountEqual(expected, actual)
class TestAPIRobotDateHandling(APITestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.superuser = StaffSuperuserFactory()
cls.user = UserFactory()
cls.client = APIClient()
repo = RepositoryFactory(name="repo1")
master = BranchFactory(name="master")
today = timezone.make_aware(
dateutil.parser.parse("01:00:00"), timezone.get_current_timezone()
)
cls.today = today.strftime("%Y-%m-%d %H:%M:%S")
# create some data that spans several days, plus one for today
tz = timezone.get_current_timezone()
for date in (
cls.today,
"2020-Jan-01",
"2020-Jan-02",
"2020-Jan-02",
"2020-Jan-03",
"2020-Jan-03",
"2020-Jan-03",
):
time_end = timezone.make_aware(
dateutil.parser.parse(f"{date} 01:00:00"), tz
)
TestResultFactory(
method__testclass__test_type="Robot",
build_flow__build__repo=repo,
build_flow__build__branch=master,
build_flow__time_end=time_end,
method__name="Test 1",
outcome="Pass",
source_file="/tmp/example.robot",
robot_keyword="Some keyword",
robot_tags="",
duration=0.1,
message="",
)
def test_date_defaults_to_today(self):
"""Verify that by default we only return tests from today"""
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv")
actual = response.content.decode().splitlines()
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
f"8,Pass,{self.today},0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
]
self.assertCountEqual(expected, actual)
def test_date_from_without_to(self):
"""Verify leaving off the "to" parameter defaults to the start date"""
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv?from=2020-01-02")
actual = response.content.decode().splitlines()
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
"10,Pass,2020-01-02 01:00:00,0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
"11,Pass,2020-01-02 01:00:00,0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
]
self.assertCountEqual(expected, actual)
def test_date_from_to(self):
"""Verify that results are returned between two dates"""
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.csv?from=2020-01-02&to=2020-01-03")
actual = response.content.decode().splitlines()
expected = [
"id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message",
"10,Pass,2020-01-02 01:00:00,0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
"11,Pass,2020-01-02 01:00:00,0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
"12,Pass,2020-01-03 01:00:00,0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
"13,Pass,2020-01-03 01:00:00,0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
"14,Pass,2020-01-03 01:00:00,0.1,repo1,master,/tmp/example.robot,Test 1,,Some keyword,",
]
self.assertCountEqual(expected, actual)
class TestAPIRobotTimePeriods(APITestCase):
"""Verify the date range computations are correct"""
def test_range(self):
errors = []
with patch(
"metaci.api.views.robot.RobotTestResultViewSet._get_today"
) as mock_get_today:
# Note: Monday of the week with this date is Dec 30,
# chosen to handle the case of last week, last month cross
# month and year boundaries
mock_get_today.return_value = dateutil.parser.parse("2020-01-01").date()
ranges = {
"today": ("2020-01-01", "2020-01-02"),
"yesterday": ("2019-12-31", "2020-01-01"),
"thisweek": ("2019-12-30", "2020-01-02"),
"lastweek": ("2019-12-23", "2019-12-30"),
"thismonth": ("2020-01-01", "2020-02-01"),
"lastmonth": ("2019-12-01", "2020-01-01"),
}
viewset = RobotTestResultViewSet()
for range_name, expected_ranges in ranges.items():
actual_start, actual_end = viewset._get_date_range(range_name)
expected_start, expected_end = (
dateutil.parser.parse(date).date() for date in expected_ranges
)
if expected_start != actual_start:
errors.append(
f"{range_name}: start expected {expected_start} actual {actual_start}"
)
if expected_end != actual_end:
errors.append(
f"{range_name}: end expected {expected_end} actual {actual_end}"
)
assert not errors, "date range exceptions\n" + "\n".join(errors)
class TestAPIRobotFilterByUser(APITestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.superuser = StaffSuperuserFactory()
cls.user = UserFactory()
cls.client = APIClient()
TestResultFactory(method__testclass__test_type="Robot")
TestResultFactory(method__testclass__test_type="Robot")
TestResultFactory(method__testclass__test_type="Robot")
testresults = TestResult.objects.all()
assign_perm(
"plan.view_builds", cls.user, testresults[0].build_flow.build.planrepo
)
assign_perm(
"plan.view_builds", cls.user, testresults[1].build_flow.build.planrepo
)
def test_testresult_filter__as_user(self):
"""Verify user only sees the results they are allowed to see"""
self.client.force_authenticate(self.user)
response = self.client.get("/api/robot.json")
data = response.json()
assert data["count"] == 2
def test_testresult_filter__as_superuser(self):
"""Verify superuser sees all results"""
self.client.force_authenticate(self.superuser)
response = self.client.get("/api/robot.json")
data = response.json()
assert data["count"] == 3
| {
"content_hash": "e24fb60d51d7331d90da514c4ca68f1c",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 116,
"avg_line_length": 41.4,
"alnum_prop": 0.5814263920671243,
"repo_name": "SalesforceFoundation/mrbelvedereci",
"id": "7ca3e98aa7b3a5e05816fd6f860813d478bac405",
"size": "15767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metaci/api/tests/test_api_robot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2069"
},
{
"name": "HTML",
"bytes": "123214"
},
{
"name": "JavaScript",
"bytes": "3993"
},
{
"name": "Python",
"bytes": "245560"
},
{
"name": "Shell",
"bytes": "4590"
}
],
"symlink_target": ""
} |
"""Contains the Service and ServiceImage models."""
from django.contrib.gis.db import models as geo_models
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from autoslug import AutoSlugField
from django_markdown.models import MarkdownField
from categories_i18n.models import Category
class Service(geo_models.Model):
"""The service model."""
categories = models.ManyToManyField(Category)
slug = AutoSlugField(
populate_from='name', max_length=30, unique=True, editable=False,
always_update=True)
name = models.CharField(
_("Service Name"), max_length=100,
help_text=_("This name will be used to identify your service."))
additional_info = MarkdownField(blank=True)
# Geo Django field to store a point
location = geo_models.PointField(
help_text=_("Represented as (longitude, latitude)"),
default="POINT(0.0 0.0)")
# You MUST use GeoManager to make Geo Queries
objects = geo_models.GeoManager()
def __unicode__(self):
"""Return name representation for service."""
return self.name
def get_absolute_url(self):
"""Get profile url for link in admin."""
return reverse('services:profile', kwargs={
'slug': self.slug
})
@property
def popupContent(self):
return '<h2>{}</h2> <p>{}</p>'.format(self.name, self.additional_info)
class ServiceImage(models.Model):
"""Images on the service profile."""
service = models.ForeignKey(Service, related_name='images')
image = models.ImageField()
order = models.PositiveIntegerField(default=1)
class Meta:
ordering = ['order']
| {
"content_hash": "b14b7b0fc926415c97f0c61a956f5a23",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 30.526315789473685,
"alnum_prop": 0.6735632183908046,
"repo_name": "empowerhack/HealthMate",
"id": "a99d1ffe95ee58a7b7d6293814da8b5dfbbcaf33",
"size": "1740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "healthmate/services/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76053"
},
{
"name": "HTML",
"bytes": "13496"
},
{
"name": "JavaScript",
"bytes": "182022"
},
{
"name": "Nginx",
"bytes": "1511"
},
{
"name": "Python",
"bytes": "15921"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
__version__ = '$Id$'
import cgitb; cgitb.enable()
import os, cgi, urllib
#
#(C) 2009 Legoktm, MIT License
#
import monobook
class Page:
def __init__(self, name, repmess = 'status'):
try:
self.qstring = dict(cgi.parse_qsl(os.environ['REQUEST_URI'].split('?')[1]))
except IndexError:
self.qstring = {}
except KeyError:
self.qstring = {}
self.name = name
try:
self.location = os.environ['SCRIPT_FILENAME']
except KeyError:
self.location = ''
try:
self.urllocation = 'http://toolserver.org' + os.environ['SCRIPT_NAME']
except KeyError:
self.urllocation = ''
self.repmess = repmess
def getValue(self, name):
try:
return self.qstring[name]
except:
return ''
def top(self):
"""
Returns the header with all JS and CSS.
"""
head = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en" dir="ltr">
<head>
<title>"""+self.name+"""</title>
<link rel="stylesheet" href="/~dispenser/resources/monobook.css" type="text/css" title="Monobook" />
<!--link rel="alternate stylesheet" href="../resources/terminal.css" type="text/css" title="Terminal" />
<link rel="alternate stylesheet" href="//wiki.ts.wikimedia.org/w/skins/chick/main.css" type="text/css" title="Chick" />
<link rel="alternate stylesheet" href="//wiki.ts.wikimedia.org/w/skins/simple/main.css" type="text/css" title="Simple" /-->
<link rel="stylesheet" href="/~dispenser/resources/common.css" type="text/css" />
<!--[if lt IE 5.5000]><style type="text/css">@import "http://en.wikipedia.org/skins-1.5/monobook/IE50Fixes.css?116";</style><![endif]-->
<!--[if IE 5.5000]><style type="text/css">@import "http://en.wikipedia.org/skins-1.5/monobook/IE55Fixes.css?116";</style><![endif]-->
<!--[if IE 6]><style type="text/css">@import "http://en.wikipedia.org/skins-1.5/monobook/IE60Fixes.css?116";</style><![endif]-->
<!--[if IE 7]><style type="text/css">@import "http://en.wikipedia.org/skins-1.5/monobook/IE70Fixes.css?116";</style><![endif]-->
<!--[if lt IE 7]><script type="text/javascript" src="http://en.wikipedia.org/skins-1.5/common/IEFixes.js?116"></script>
<meta http-equiv="imagetoolbar" content="no" /><![endif]-->
</head>
<body class="mediawiki ltr">
<div id="globalWrapper">
<div id="column-content">
<div id="content">
<a name="top" id="top"></a>
<h1 class="firstHeading">"""+self.name+"""</h1>
<div id="bodyContent">
<h3 id="siteSub"></h3>
<div id="contentSub"></div>
"""
return head
def body(self, content):
"""
Returns the content surrounded by comments.
"""
text = '<!-- Start Content -->'
if self.qstring.has_key('action'):
if self.qstring['action'] == 'source':
text += """
<div id="viewsourcetext">You can view and copy the source of this page:</div>
<textarea id="wpTextbox1" name="wpTextbox1" cols="80" rows="25" readonly="readonly">
"""+content.replace('<','<').replace('>','>')+"""
</textarea>
"""
elif self.qstring['action'] == 'view':
text += content
else:
self.qstring['action'] = 'view' #someone added a weird action
text += content
else: #default to action == view
self.qstring['action'] = 'view'
text += content
text += """
<!-- tabs -->
<div id="p-cactions" class="portlet" style="top:-1.7em;left:0;">
<div class="pBody">
<ul>
"""
if self.qstring['action'] == 'view':
del self.qstring['action']
text += """
<li class="selected"><a href=\""""+self.urllocation+"""\">Page</a></li>
<li><a href=\""""+self.urllocation+"""?action=source&"""+urllib.urlencode(self.qstring)+"""\">View source</a></li>
"""
elif self.qstring['action'] == 'source':
text += """
<li><a href=\""""+self.urllocation+"""\">Page</a></li>
<li class="selected"><a href=\""""+self.urllocation+"""?action=source&"""+urllib.urlencode(self.qstring)+"""\">View source</a></li>
"""
text += """
</ul>
</div>
</div>
"""
text += '<!-- End Content -->\n'
return text
def footer(self):
"""
Returns the footer and navbar.
"""
text = """
<div class="visualClear"></div>
</div>
</div>
</div>
<div id="column-one">
<div class="portlet" id="p-logo">
<a href="../view/Main_Page"></a>
</div>
<div class='portlet' id='p-personal'>
<h5>Interaction</h5>
<div class='pBody'>
<ul>
<li><a href="/~legoktm/cgi-bin/index.py">Main page</a></li>
<li><a href="http://code.google.com/p/legobot/source/browse#svn/trunk/toolserver">Subversion</a></li>
<li><a href="http://code.google.com/p/legobot/issues/list">Bug Tracker</a></li>
</ul>
</div>
</div>
<div class='portlet'>
<h5>Tools</h5>
<div class='pBody'>
<ul>
<li><a href="/~legoktm/cgi-bin/count.py">Edit Counter</a></li>
<li><a href="/~legoktm/cgi-bin/reflinks.py">Template filler</a></li>
</ul>
</div>
<h5>Status</h5>
<div class='pStatus'>
"""+"""
</div>
</div>
<div class="visualClear"></div>
<div id="footer">
<a href="/" id="f-poweredbyico"><img src="/images/wikimedia-toolserver-button.png" alt="Powered by the Wikimedia Toolserver" title="About this server" width="88" height="31" /></a>
<a href="http://validator.w3.org/check?uri=referer" id="f-copyrightico"><img src="http://www.w3.org/Icons/valid-xhtml10" alt="Valid XHTML 1.0 Transitional" height="31" width="88" title="Validation dependent on wiki code" /></a>
Maintained by
<a href="http://en.wikipedia.org/wiki/User:Legoktm" class="extiw">Legoktm</a>
(<a href="http://en.wikipedia.org/wiki/User_talk:Legoktm" class="extiw">Talk</a>).
</div>
</div></body>
</html>"""
return text
| {
"content_hash": "5448b07786eacbac9a51592f4da64770",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 227,
"avg_line_length": 36.11834319526627,
"alnum_prop": 0.5791284403669725,
"repo_name": "legoktm/legobot-old",
"id": "cacc39e9b79a9e0580801fb474f3e8d10607df2a",
"size": "6184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toolserver/public_html/cgi-bin/monobook2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "165"
},
{
"name": "Python",
"bytes": "194337"
}
],
"symlink_target": ""
} |
import hazelcast
client = hazelcast.HazelcastClient(
# Set up cluster name for authentication
cluster_name="YOUR_CLUSTER_NAME",
# Set the token of your cloud cluster
cloud_discovery_token="YOUR_CLUSTER_DISCOVERY_TOKEN",
# If you have enabled encryption for your cluster, also configure TLS/SSL for the client.
# Otherwise, skip options below.
ssl_enabled=True,
ssl_cafile="/path/to/ca.pem",
ssl_certfile="/path/to/cert.pem",
ssl_keyfile="/path/to/key.pem",
ssl_password="YOUR_KEY_STORE_PASSWORD",
)
my_map = client.get_map("map-on-the-cloud").blocking()
my_map.put("key", "value")
print(my_map.get("key"))
client.shutdown()
| {
"content_hash": "a1fb46cdef3fa99c4ea082443c912b28",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 93,
"avg_line_length": 30.545454545454547,
"alnum_prop": 0.6964285714285714,
"repo_name": "hazelcast/hazelcast-python-client",
"id": "de7eca04602d2d054043fb59d43ab0e24ade8158",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/cloud-discovery/hazelcast_cloud_discovery_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2300326"
},
{
"name": "Shell",
"bytes": "1900"
}
],
"symlink_target": ""
} |
import redis
from redis import sentinel
from redis import exceptions
import random
class DisconnectingSentinel(sentinel.Sentinel):
"""
sentinel that disconnections after slave/master discovery
"""
def disconnect_sentinels(selfie):
for s in selfie.sentinels:
s.connection_pool.disconnect()
def discover_master(selfie, *args, **kwargs):
ret = super(DisconnectingSentinel, selfie).discover_master(*args,
**kwargs)
selfie.disconnect_sentinels()
return ret
def discover_slaves(selfie, *args, **kwargs):
ret = super(DisconnectingSentinel, selfie).discover_slaves(*args,
**kwargs)
selfie.disconnect_sentinels()
return ret
def filter_slaves(selfie, slaves):
"""
Remove slaves that are in an ODOWN or SDOWN state
also remove slaves that do not have 'ok' master-link-status
"""
return [(s['ip'], s['port']) for s in slaves
if not s['is_odown'] and
not s['is_sdown'] and
s['master-link-status'] == 'ok']
class DisconnectRedis(redis.StrictRedis):
"""
normal redis.StrictRedis class + disconnect() function
"""
def disconnect(selfie):
selfie.connection_pool.disconnect()
class TwiceRedis(object):
"""
read and write sentinel connection pool backed redis client
with disconnecting sentinel clients and redis clients
"""
generic_error = exceptions.RedisError
def __init__(selfie, master_name, sentinels, password=None,
check_connection=False, socket_timeout=None,
min_other_sentinels=0,
pool_kwargs=None, client_kwargs=None):
pool_kwargs = {} if pool_kwargs is None else pool_kwargs
client_kwargs = {} if client_kwargs is None else client_kwargs
# NOTE(tr3buchet) always the first sentinel will be (re)used by the
# connection pool unless it fails to provide a
# good master or slaves during dicovery, in which case
# the next in the list is tried. so we shuffle the list
# here to distribute the sentinel load around
sentinels = list(sentinels)
random.shuffle(sentinels)
cp = sentinel.SentinelConnectionPool
master_pool = cp(master_name,
DisconnectingSentinel(sentinels, min_other_sentinels),
is_master=True, check_connection=check_connection,
password=password, **pool_kwargs)
slave_pool = cp(master_name,
DisconnectingSentinel(sentinels, min_other_sentinels),
is_master=False, check_connection=check_connection,
password=password, **pool_kwargs)
selfie.write_client = DisconnectRedis(connection_pool=master_pool,
socket_timeout=socket_timeout,
**client_kwargs)
selfie.read_client = DisconnectRedis(connection_pool=slave_pool,
socket_timeout=socket_timeout,
**client_kwargs)
@property
def master(selfie):
return selfie.write_client
@property
def slave(selfie):
return selfie.read_client
@property
def write(selfie):
return selfie.write_client
@property
def read(selfie):
return selfie.read_client
def disconnect(selfie):
selfie.write_client.disconnect()
selfie.read_client.disconnect()
| {
"content_hash": "9115cf9b82c086f57b2017b069e7c871",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 36.51923076923077,
"alnum_prop": 0.5726698262243286,
"repo_name": "alanquillin/twiceredis",
"id": "b348d6225d8797e37dca9408133e6ac133876e86",
"size": "4421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twiceredis/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6351"
}
],
"symlink_target": ""
} |
from configparser import ConfigParser, NoOptionError
# End Python 3
import requests
import os
import shutil
import fnmatch
import time
import getpass
import itertools
from ltk import exceptions
from ltk.apicalls import ApiCalls
from ltk.utils import *
from ltk.managers import DocumentManager, FolderManager
from ltk.constants import CONF_DIR, CONF_FN, SYSTEM_FILE, ERROR_FN
import json
from ltk.logger import logger
from ltk.git_auto import Git_Auto
from tabulate import tabulate
class Action:
def __init__(self, path, watch=False, timeout=60):
self.host = ''
self.access_token = ''
self.project_id = ''
self.project_name = ''
self.path = path
self.community_id = ''
self.workflow_id = '' # default workflow id; MT phase only
self.locale = ''
self.clone_option = 'on'
self.auto_format_option = ''
self.download_option = 'clone'
self.download_dir = None # directory where downloaded translation will be stored
self.watch_locales = set() # if specified, add these target locales to any files in the watch folder
self.git_autocommit = None
self.git_username = ''
self.git_password = ''
self.append_option = 'none'
self.locale_folders = {}
if not self._is_initialized():
raise exceptions.UninitializedError("This project is not initialized. Please run init command.")
self._initialize_self()
self.watch = watch
self.doc_manager = DocumentManager(self.path)
self.folder_manager = FolderManager(self.path)
self.timeout = timeout
self.api = ApiCalls(self.host, self.access_token, self.watch, self.timeout)
self.git_auto = Git_Auto(self.path)
self.error_file_name = os.path.join(self.path, CONF_DIR, ERROR_FN)
def _is_initialized(self):
actual_path = find_conf(self.path)
if not actual_path:
return False
self.path = os.path.join(actual_path, '')
if not is_initialized(self.path):
return False
return True
def _initialize_self(self):
config_file_name = os.path.join(self.path, CONF_DIR, CONF_FN)
conf_parser = ConfigParser()
conf_parser.read(config_file_name)
self.host = conf_parser.get('main', 'host')
self.access_token = conf_parser.get('main', 'access_token')
self.project_id = conf_parser.get('main', 'project_id')
self.community_id = conf_parser.get('main', 'community_id')
self.workflow_id = conf_parser.get('main', 'workflow_id')
self.locale = conf_parser.get('main', 'default_locale')
self.locale = self.locale.replace('_','-')
try:
if conf_parser.has_option('main', 'auto_format'):
self.auto_format_option = conf_parser.get('main', 'auto_format')
else:
self.update_config_file('auto_format', 'on', conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'project_name'):
self.project_name = conf_parser.get('main', 'project_name')
if conf_parser.has_option('main', 'download_folder'):
self.download_dir = conf_parser.get('main', 'download_folder')
else:
self.download_dir = None
self.update_config_file('download_folder', json.dumps(self.download_dir), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'watch_locales'):
watch_locales = conf_parser.get('main', 'watch_locales')
if watch_locales:
self.watch_locales = set(watch_locales.split(','))
else:
# there are no watch locales, so set it to an empty set
self.watch_locales = set()
else:
self.watch_locales = set()
self.update_config_file('watch_locales', json.dumps(list(self.watch_locales)), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'locale_folders'):
self.locale_folders = json.loads(conf_parser.get('main', 'locale_folders'))
locale_folders = {}
#for key, value in self.locale_folders.items():
# key = key.replace('_', '-');
# locale_folders[key] = value
#self.locale_folders = locale_folders
else:
self.locale_folders = {}
self.update_config_file('locale_folders', json.dumps(self.locale_folders), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'download_option'):
self.download_option = conf_parser.get('main', 'download_option')
else:
self.download_option = 'clone'
self.update_config_file('download_option', self.download_option, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'clone_option'):
self.clone_option = conf_parser.get('main', 'clone_option')
else:
self.clone_option = 'on'
self.update_config_file('clone_option', self.clone_option, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_autocommit'):
self.git_autocommit = conf_parser.get('main', 'git_autocommit')
else:
self.git_autocommit = ''
self.update_config_file('git_autocommit', self.git_autocommit, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_username'):
self.git_username = conf_parser.get('main', 'git_username')
else:
self.git_username = ''
self.update_config_file('git_username', self.git_username, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_password'):
self.git_password = conf_parser.get('main', 'git_password')
else:
self.git_password = ''
self.update_config_file('git_password', self.git_password, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'append_option'):
self.append_option = conf_parser.get('main', 'append_option')
else:
self.append_option = 'none'
self.update_config_file('append_option', self.append_option, conf_parser, config_file_name, "")
except NoOptionError as e:
if not self.project_name:
self.api = ApiCalls(self.host, self.access_token)
project_info = self.api.get_project_info(self.community_id)
self.project_name = project_info[self.project_id]
config_file_name, conf_parser = self.init_config_file()
log_info = 'Updated project name'
self.update_config_file('project_name', self.project_name, conf_parser, config_file_name, log_info)
def _add_document(self, file_name, title, doc_id):
""" adds a document to db """
now = time.time()
# doc_id = json['properties']['id']
full_path = os.path.join(self.path, file_name)
last_modified = os.stat(full_path).st_mtime
self.doc_manager.add_document(title, now, doc_id, last_modified, now, file_name)
def _update_document(self, file_name):
""" updates a document in the db """
now = time.time()
file_path = os.path.join(self.path, file_name)
# sys_last_modified = os.stat(file_name).st_mtime
sys_last_modified = os.stat(file_path).st_mtime
entry = self.doc_manager.get_doc_by_prop('file_name', file_name)
doc_id = entry['id']
self.doc_manager.update_document('last_mod', now, doc_id)
self.doc_manager.update_document('sys_last_mod', sys_last_modified, doc_id)
# whenever a document is updated, it should have new translations
self.doc_manager.update_document('downloaded', [], doc_id)
def close(self):
self.doc_manager.close_db()
def open(self):
self.doc_manager.open_db()
def init_config_file(self):
config_file_name = os.path.join(self.path, CONF_DIR, CONF_FN)
conf_parser = ConfigParser()
conf_parser.read(config_file_name)
return config_file_name, conf_parser
def update_config_file(self, option, value, conf_parser, config_file_name, log_info):
try:
conf_parser.set('main', option, value)
with open(config_file_name, 'w') as new_file:
conf_parser.write(new_file)
self._initialize_self()
if (len(log_info)):
logger.info(log_info+"\n")
except IOError as e:
print(e.errno)
print(e)
def get_relative_path(self, path):
return get_relative_path(self.path, path)
def get_current_path(self, path):
cwd = os.getcwd()
if cwd in path:
path = path.replace(cwd,"")
return path
else:
cwd_relative_path = cwd.replace(self.path,"")
return path.replace(cwd_relative_path+os.sep,"")
def get_current_abs(self, path):
# print("orig path: "+str(path))
cwd = os.getcwd()
if cwd in path:
path = path.replace(cwd,"")
else:
# print("cwd: "+cwd)
# print("self.path: "+self.path)
cwd_relative_path = cwd.replace(self.path,"")
# print("cwd relative path: "+cwd_relative_path)
cwd_path = path.replace(cwd_relative_path+os.sep,"")
# print("cwd path: "+cwd_path)
path = cwd_path
# print("current path: "+path)
# print("abs path: "+os.path.abspath(path))
return os.path.abspath(path)
def norm_path(self, file_location):
# print("original path: "+str(file_location))
if file_location:
file_location = os.path.normpath(file_location)
# abspath=os.path.abspath(file_location)
# print("abspath: "+str(os.path.abspath(os.path.expanduser(file_location))))
# print("self.path: "+self.path)
# print("cwd: "+str(os.getcwd()))
norm_path = os.path.abspath(os.path.expanduser(file_location)).replace(self.path, '')
# print("normalized path: "+norm_path)
# print("joined path: "+str(os.path.join(self.path,file_location)))
# if file_location == ".." and self.path.rstrip('/') in norm_path:
# return norm_path.replace(self.path.rstrip('/'), '')
if file_location is not "." and ".." not in file_location and os.path.exists(os.path.join(self.path,file_location)):
# print("returning original path: "+str(file_location))
return file_location.replace(self.path, '')
elif ".." in file_location and file_location != "..":
# print("returning norm path: "+norm_path)
return norm_path.replace(self.path,'')
if not os.path.exists(os.path.join(self.path,norm_path)) and os.path.exists(os.path.join(self.path,file_location)):
# print("Starting path at project directory: "+file_location.replace(self.path, ''))
return os.path.abspath(os.path.expanduser(file_location.replace(self.path, ''))).replace(self.path, '')
elif file_location == "..":
return os.path.abspath(os.path.expanduser(file_location.replace(self.path, ''))).replace(self.path, '')
return norm_path
else:
return None
def get_docs_in_path(self, path):
files = get_files(path)
db_files = self.doc_manager.get_file_names()
docs = []
if files:
for file in files:
file_name = self.norm_path(file)
if file_name in db_files:
docs.append(self.doc_manager.get_doc_by_prop('file_name',file_name))
return docs
def get_doc_filenames_in_path(self, path):
files = get_files(path)
db_files = self.doc_manager.get_file_names()
docs = []
if files:
for file in files:
file_name = self.norm_path(file)
if file_name in db_files:
docs.append(file_name)
return docs
def get_doc_locales(self, doc_id, doc_name):
locales = []
response = self.api.document_translation_status(doc_id)
if response.status_code != 200:
if check_response(response) and response.json()['messages'] and 'No translations exist' in response.json()['messages'][0]:
return locales
if doc_name:
raise_error(response.json(), 'Failed to check target locales for document '+doc_name, True, doc_id)
else:
raise_error(response.json(), 'Failed to check target locales for document '+doc_id, True, doc_id)
try:
if 'entities' in response.json():
for entry in response.json()['entities']:
locales.append(entry['properties']['locale_code'])
except KeyError as e:
print("Error listing translations")
return
# return detailed_status
return locales
def is_locale_folder_taken(self, new_locale, path):
# Python 2
# for locale, folder in self.locale_folders.iteritems():
# End Python 2
# Python 3
for locale, folder in self.locale_folders.items():
# End Python 3
if path == folder and not locale == new_locale:
return locale
return False
def update_document_action(self, file_name, title=None, **kwargs):
try:
relative_path = self.norm_path(file_name)
entry = self.doc_manager.get_doc_by_prop('file_name', relative_path)
try:
document_id = entry['id']
except TypeError as e:
log_error(self.error_file_name, e)
logger.error("Document name specified for update doesn't exist: {0}".format(title))
return
if title:
response = self.api.document_update(document_id, file_name, title=title, **kwargs)
else:
response = self.api.document_update(document_id, file_name)
if response.status_code != 202:
raise_error(response.json(), "Failed to update document {0}".format(file_name), True)
self._update_document(relative_path)
return True
except Exception as e:
log_error(self.error_file_name, e)
if 'string indices must be integers' in str(e) or 'Expecting value: line 1 column 1' in str(e):
logger.error("Error connecting to Lingotek's TMS")
else:
logger.error("Error on updating document"+str(file_name)+": "+str(e))
def _target_action_db(self, to_delete, locales, document_id):
if to_delete:
curr_locales = self.doc_manager.get_doc_by_prop('id', document_id)['locales']
updated_locales = set(curr_locales) - set(locales)
self.doc_manager.update_document('locales', updated_locales, document_id)
else:
self.doc_manager.update_document('locales', list(locales), document_id)
def update_doc_locales(self, document_id):
try:
locale_map = self.import_locale_info(document_id)
locale_info = list(iter(locale_map))
except exceptions.RequestFailedError as e:
log_error(self.error_file_name, e)
locale_info = []
self.doc_manager.update_document('locales', locale_info, document_id)
def added_folder_of_file(self, file_path):
folders = self.folder_manager.get_file_names()
if not folders:
#print("not folders")
return
for folder in folders:
folder = os.path.join(self.path, folder)
if folder in file_path:
return folder
def get_new_name(self, file_name, curr_path):
i = 1
file_path = os.path.join(curr_path, file_name)
name, extension = os.path.splitext(file_name)
while os.path.isfile(file_path):
new_name = '{name}({i}){ext}'.format(name=name, i=i, ext=extension)
file_path = os.path.join(curr_path, new_name)
i += 1
return file_path
def import_locale_info(self, document_id, poll=False):
locale_progress = {}
response = self.api.document_translation_status(document_id)
if response.status_code != 200:
if poll or response.status_code == 404:
return {}
else:
# raise_error(response.json(), 'Failed to get locale details of document', True)
raise exceptions.RequestFailedError('Failed to get locale details of document')
try:
for entry in response.json()['entities']:
curr_locale = entry['properties']['locale_code']
curr_progress = int(entry['properties']['percent_complete'])
curr_locale = curr_locale.replace('-', '_')
locale_progress[curr_locale] = curr_progress
except KeyError:
pass
return locale_progress
def delete_local(self, title, document_id, message=None):
# print('local delete:', title, document_id)
if not title:
title = document_id
message = '{0} has been deleted locally'.format(title) if not message else message
try:
file_name = self.doc_manager.get_doc_by_prop('id', document_id)['file_name']
except TypeError:
logger.info('Document to remove not found in the local database')
return
try:
os.remove(os.path.join(self.path, file_name))
logger.info(message)
except OSError:
logger.info('Something went wrong trying to delete the local file')
def delete_local_translation(self, file_name):
try:
if not file_name:
logger.info('Please provide a valid file name')
logger.info('{0} (local translation) has been deleted'.format(self.get_relative_path(file_name)))
os.remove(os.path.join(self.path, file_name))
except OSError:
logger.info('Something went wrong trying to download the local translation')
def delete_local_path(self, path, message=None):
path = self.norm_path(path)
message = '{0} has been deleted locally.'.format(path) if not message else message
try:
os.remove(path)
logger.info(message)
except OSError:
logger.info('Something went wrong trying to delete the local file')
def raise_error(json, error_message, is_warning=False, doc_id=None, file_name=None):
try:
if json:
error = json['messages'][0]
file_name = file_name.replace("Status of ", "")
if file_name is not None and doc_id is not None:
error = error.replace(doc_id, file_name+" ("+doc_id+")")
# Sometimes api returns vague errors like 'Unknown error'
if error == 'Unknown error':
error = error_message
if not is_warning:
raise exceptions.RequestFailedError(error)
# warnings.warn(error)
logger.error(error)
except (AttributeError, IndexError):
if not is_warning:
raise exceptions.RequestFailedError(error_message)
# warnings.warn(error_message)
logger.error(error_message)
def is_initialized(project_path):
ltk_path = os.path.join(project_path, CONF_DIR)
if os.path.isdir(ltk_path) and os.path.isfile(os.path.join(ltk_path, CONF_FN)) and \
os.stat(os.path.join(ltk_path, CONF_FN)).st_size:
return True
return False
def choice_mapper(info):
mapper = {}
import operator
#sorted_info = sorted(info.iteritems(), key=operator.itemgetter(1))
sorted_info = sorted(info.items(), key = operator.itemgetter(1))
index = 0
for entry in sorted_info:
if entry[0] and entry[1]:
mapper[index] = {entry[0]: entry[1]}
index += 1
table = []
for k,v in mapper.items():
try:
for values in v:
table.append({
"ID": k,
"Name": v[values],
"UUID": values
})
except UnicodeEncodeError:
continue
print(tabulate(table, headers="keys"), "\n")
return mapper
def find_conf(curr_path):
"""
check if the conf folder exists in current directory's parent directories
"""
if os.path.isdir(os.path.join(curr_path, CONF_DIR)):
return curr_path
elif curr_path == os.path.abspath(os.sep):
return None
else:
return find_conf(os.path.abspath(os.path.join(curr_path, os.pardir)))
def printResponseMessages(response):
for message in response.json()['messages']:
logger.info(message)
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
allPatterns = []
if isinstance(patterns,list) or isinstance(patterns,tuple):
for pattern in patterns:
basename = os.path.basename(pattern)
if basename and basename != "":
allPatterns.extend(getRegexFiles(pattern,cwd))
else:
allPatterns.append(pattern)
else:
basename = os.path.basename(patterns)
if basename and basename != "":
allPatterns.extend(getRegexFiles(patterns,cwd))
else:
allPatterns.append(patterns)
matched_files = []
# print("all patterns: "+str(allPatterns))
for pattern in allPatterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
# split_path = root.split(os.sep)
# print("split_path: {0}".format(split_path))
for file in files:
if not (("desktop.ini" in file) or ('Thumbs.db' in file) or ('ehthumbs.db' in file)): # don't add desktop.ini, Thumbs.db, or ehthumbs.db files
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
# else:
# logger.info("File not found: "+pattern)
# subdir_pat, fn_pat = os.path.split(pattern)
# if not subdir_pat:
# for path, subdirs, files in os.walk(root):
# for fn in fnmatch.filter(files, pattern):
# matched_files.append(os.path.join(path, fn))
# else:
# for path, subdirs, files in os.walk(root):
# # print os.path.split(path)
# # subdir = os.path.split(path)[1] # get current subdir
# search_root = os.path.join(root, '')
# subdir = path.replace(search_root, '')
# # print subdir, subdir_pat
# if fnmatch.fnmatch(subdir, subdir_pat):
# for fn in fnmatch.filter(files, fn_pat):
# matched_files.append(os.path.join(path, fn))
if len(matched_files) == 0:
return None
return matched_files
def getRegexFiles(pattern,path):
dir_name = os.path.dirname(pattern)
if dir_name:
path = os.path.join(path,dir_name)
pattern_name = os.path.basename(pattern)
# print("path: "+path)
# print("pattern: "+str(pattern))
matched_files = []
if pattern_name and not "*" in pattern:
return [pattern]
for path, subdirs, files in os.walk(path):
for fn in fnmatch.filter(files, pattern):
matched_files.append(os.path.join(path, fn))
# print("matched files: "+str(matched_files))
return matched_files
def log_id_names(json):
"""
logs the id and titles from a json object
"""
ids = []
titles = []
for entity in json['entities']:
ids.append(entity['properties']['id'])
titles.append(entity['properties']['title'])
return ids, titles
| {
"content_hash": "4cdca40329bd48bad0ecfa4ab4801842",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 168,
"avg_line_length": 43.4137323943662,
"alnum_prop": 0.5776389959041324,
"repo_name": "Lingotek/translation-utility",
"id": "541307ec23476dccb6736955756ba1e9f95dc53a",
"size": "24789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/ltk/actions/action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "192"
},
{
"name": "Python",
"bytes": "125292"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class AutorangeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="autorange", parent_name="layout.scene.xaxis", **kwargs
):
super(AutorangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", [True, False, "reversed"]),
**kwargs,
)
| {
"content_hash": "20e6f705224dc63d86e251ad424e003a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 37.266666666666666,
"alnum_prop": 0.6028622540250447,
"repo_name": "plotly/plotly.py",
"id": "753bbaa11906a5131051bd2810a73a670d5b0ebd",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/xaxis/_autorange.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import os.path
import shutil
import time
import urlparse
import sys
try:
import engage
except:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
import engage.tests.test_common as tc
from engage.engine.cmdline_install import APPLICATION_ARCHIVE_PROP
from engage.engine.password import create_password_db
join = os.path.join
# JF 2013-05-20: Django installer is obsolete - not supporting anymore
INSTALLERS = {
'tomcat': {
'hello': {
'install': {
APPLICATION_ARCHIVE_PROP: join(tc.TEST_APP_DIR, 'tomcat_hello.war'),
'websvr_port': 8080, # for checking response
'manager_port': 8080,
'password_map': {'apache-tomcat/admin_password': 'testpass'}
},
}
},
## 'django': {
## 'test-app': {
## 'install': {
## APPLICATION_ARCHIVE_PROP: join(tc.TEST_APP_DIR, 'django_test_app_v1.tgz')},
## 'upgrade': {
## APPLICATION_ARCHIVE_PROP: join(tc.TEST_APP_DIR, 'django_test_app_v2.tgz')}
## }
## }
}
OPERATIONS = ['install', 'upgrade']
def app_is_available(config_map):
"""Determine availability by making requests based on url templates"""
templates = config_map['expected_url_codes']
host = config_map['websvr_hostname']
port = config_map['websvr_port']
urls = [(tmpl.format(host=host, port=port), ex) for (tmpl, ex) in templates]
tc.logger.debug('Checking app availability at %s' % urls)
got_expected = []
try:
for (url, expect) in urls:
_scheme, netloc, path, _query, _fragment = urlparse.urlsplit(url)
got_expected.append(expect == tc.get_response(netloc, path))
return all(got_expected)
except:
tc.logger.exception('Exception while getting response')
return False
def run_operations(installer_name, app_name):
deploy_dir = tc.get_randomized_deploy_dir('test_install_')
master_password_file = join(deploy_dir, 'master_password')
operations = INSTALLERS[installer_name][app_name].keys()
for operation in OPERATIONS: # retain correct order
if operation not in operations:
continue
config_map = tc.get_config(INSTALLERS[installer_name][app_name][operation])
config_map['Installer'] = installer_name
config_map['Install directory'] = deploy_dir
if operation == 'install':
assert tc.port_is_available(tc.get_netloc(config_map))
tc.bootstrap(deploy_dir)
tc.write_master_password(master_password_file)
config_dir = tc.ensure_subdir(deploy_dir, 'config')
config_path = tc.write_config_file(config_dir, config_map)
if 'password_map' in config_map:
create_password_db(deploy_dir, tc.DEFAULT_MASTER_PASSWORD,
config_map['password_map'])
exit_code = tc.install(deploy_dir, config_path, master_password_file)
elif operation == 'upgrade':
exit_code = tc.upgrade(deploy_dir, tc.ENGAGE_DIR,
config_map[APPLICATION_ARCHIVE_PROP],
master_password_file)
assert config_map['expected_exit_code'] == exit_code
time.sleep(1) # allow for delayed start
assert app_is_available(config_map)
if operation == 'upgrade' or len(operations) == 1:
# only shutdown on install if it's the only operation
tc.stop(tc.get_init_script(config_map), master_password_file)
assert tc.port_is_available(tc.get_netloc(config_map))
tc.logger.info('removing %s' % (deploy_dir))
shutil.rmtree(deploy_dir)
def test_install_upgrade_generator():
"""Generate install+upgrade tests based on INSTALLERS tree"""
tc.assert_context(tc.ENGAGE_DIR)
for installer_name in INSTALLERS:
for app_name in INSTALLERS[installer_name]:
yield run_operations, installer_name, app_name
# You can run this test file as a main script. By default,
# this will execute all the install_upgrade tests. You can also
# run individual tests via the apps and installer command line options.
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--installer", dest="installer", default="django",
help="installer name, default is django (used with --apps option)")
parser.add_option("--apps", dest="apps", default=None,
help="list of apps to test (defaults to all)")
(opts, args) = parser.parse_args()
if opts.apps:
app_list = opts.apps.split(",")
else:
app_list = INSTALLERS[opts.installer].keys()
for app in app_list:
run_operations(opts.installer, app)
sys.exit(0)
| {
"content_hash": "91af51f41f852b05fe302c1c8f9c7570",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 94,
"avg_line_length": 39.128,
"alnum_prop": 0.6150071560008178,
"repo_name": "quaddra/engage",
"id": "2977a19fbef68d8e26a7b06ab9c95e772e1ba5de",
"size": "4891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_pkg/engage/tests/test_install_upgrade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13559"
},
{
"name": "Makefile",
"bytes": "8662"
},
{
"name": "OCaml",
"bytes": "193014"
},
{
"name": "Python",
"bytes": "1425458"
},
{
"name": "Shell",
"bytes": "18171"
}
],
"symlink_target": ""
} |
"""
WSGI config for web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "ba2c669cd64f3421fcd90a3060bafbf1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.214285714285715,
"alnum_prop": 0.7716535433070866,
"repo_name": "mblaauw/pre-publish-predictor",
"id": "2b0f73c3a32995baf11f84090e97efd82c2f9792",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/web/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3352"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from freetype import *
def platform_name(platform_id):
for key, value in TT_PLATFORMS.items():
if value == platform_id:
return key
return 'Unknown platform'
def encoding_name(platform_id, encoding_id):
if platform_id == TT_PLATFORM_APPLE_UNICODE:
encodings = TT_APPLE_IDS
elif platform_id == TT_PLATFORM_MACINTOSH:
encodings = TT_MAC_IDS
elif platform_id == TT_PLATFORM_MICROSOFT:
encodings = TT_MS_IDS
elif platform_id == TT_PLATFORM_ADOBE:
encodings = TT_ADOBE_IDS
else:
return 'Unknown encoding'
for key, value in encodings.items():
if value == encoding_id:
return key
return 'Unknown encoding'
def language_name(platform_id, language_id):
if platform_id == TT_PLATFORM_MACINTOSH:
languages = TT_MAC_LANGIDS
elif platform_id == TT_PLATFORM_MICROSOFT:
languages = TT_MS_LANGIDS
else:
return 'Unknown language'
for key, value in languages.items():
if value == language_id:
return key
return 'Unknown language'
if __name__ == '__main__':
import os, sys
if len(sys.argv) < 2:
print("Usage: %s font_filename" % sys.argv[0])
sys.exit()
face = Face(sys.argv[1])
name = face.get_sfnt_name(0)
print( 'platform_id:', platform_name(name.platform_id) )
print( 'encoding_id:', encoding_name(name.platform_id,
name.encoding_id) )
print( 'language_id:', language_name(name.platform_id,
name.language_id) )
for i in range(face.sfnt_name_count):
name = face.get_sfnt_name(i).string
print(i, name.decode('utf-8', 'ignore'))
| {
"content_hash": "16ce4a36cbd7944c7d8739782c5b9935",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 60,
"avg_line_length": 30.3,
"alnum_prop": 0.5962596259625963,
"repo_name": "davidcox/freetype-py",
"id": "ffebc00a5f253952b2a6d9dfaf83f956991743d2",
"size": "2149",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/sfnt-names.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "254640"
}
],
"symlink_target": ""
} |
__title__ = 'wp2github'
__description__ = 'Convert WordPress plugin readme file to GitHub Flavored Markdown'
__version_info__ = (1, 1, 3)
__version__ = '.'.join(map(str, __version_info__))
__author__ = 'Sergey Storchay'
__author_email__ = 'r8@r8.com.ua'
_url_ = 'https://github.com/r8/wp2github.py'
| {
"content_hash": "96f143e6a5301cc67148514cf93a0bcd",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 84,
"avg_line_length": 42.714285714285715,
"alnum_prop": 0.6454849498327759,
"repo_name": "r8/wp2github.py",
"id": "072ec6a01f49b6a963267619491f191008a9c27a",
"size": "299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wp2github/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6273"
}
],
"symlink_target": ""
} |
from setuptools import setup
from codecs import open
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
setup(
name="reflexif",
packages=['reflexif'],
version="0.1.0.dev2",
author="Christoph Schmitt",
author_email="dev@chschmitt.de",
description="A library to read, inspect and modify Exif data entirely written in Python",
long_description=readme,
license="BSD License",
package_data={'': ['LICENSE']},
include_package_data=True,
keywords="exif",
url="https://github.com/chschmitt/reflexif",
package_dir={'reflexif': 'reflexif'},
test_suite="tests",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython"
]
)
| {
"content_hash": "36e6fb0b0e6a2a6ff46eca93e8300c43",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 93,
"avg_line_length": 30.366666666666667,
"alnum_prop": 0.6278814489571899,
"repo_name": "chschmitt/reflexif",
"id": "62e009cf3aa27cac99da743a1f89988da35f511e",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56372"
}
],
"symlink_target": ""
} |
import unittest
from g1.operations.databases.bases import interfaces
class InterfacesTest(unittest.TestCase):
def test_next_key(self):
self.assertEqual(interfaces.next_key(b'\x00'), b'\x01')
self.assertEqual(interfaces.next_key(b'\x01'), b'\x02')
self.assertEqual(interfaces.next_key(b'\xfe'), b'\xff')
self.assertEqual(interfaces.next_key(b'\xff'), b'\x01\x00')
self.assertEqual(interfaces.next_key(b'\x01\x00'), b'\x01\x01')
self.assertEqual(interfaces.next_key(b'\x01\xfe'), b'\x01\xff')
self.assertEqual(interfaces.next_key(b'\x01\xff'), b'\x02\x00')
self.assertEqual(interfaces.next_key(b'\xff\xff'), b'\x01\x00\x00')
self.assertEqual(
interfaces.next_key(b'\x01\x00\x00\xff'), b'\x01\x00\x01\x00'
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e8c653f7e1781f3d1b2c23016a58ca5a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 37.130434782608695,
"alnum_prop": 0.6358313817330211,
"repo_name": "clchiou/garage",
"id": "b13461d7814234f8d51c3958608fd8caff543f83",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/g1/operations/databases/bases/tests/test_interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cap'n Proto",
"bytes": "6917"
},
{
"name": "HTML",
"bytes": "113"
},
{
"name": "Java",
"bytes": "61027"
},
{
"name": "Python",
"bytes": "1653733"
},
{
"name": "Shell",
"bytes": "6209"
}
],
"symlink_target": ""
} |
"""
Flask App Setup
:Author: Fabio Scala <fabio.scala@gmail.com>
"""
import os
import sys
import config
basedir = config.basedir
appdir = os.path.abspath(os.path.dirname(__file__))
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.cache import Cache
cache = Cache(config={'CACHE_TYPE': 'simple'})
db = SQLAlchemy()
from .routing import osrm
osrm = osrm.OsrmService()
category_service = None
from flask import Flask, Blueprint
def create_app(config_name):
eff_config = config.config[config_name]
app = Flask(__name__, static_folder=eff_config.APP_STATIC_FOLDER, static_url_path='')
app.config.from_object(eff_config)
eff_config.init_app(app)
cache.init_app(app)
db.init_app(app)
osrm.set_base_url(eff_config.APP_OSRM_URL)
from app import pois
pois.set_config_path(os.path.join(basedir, 'providers_config.yaml'))
from .api import api as blueprint_api_v_1_0
from .main import main as blueprint_main
blueprint_main.static_folder = '../' + eff_config.APP_STATIC_FOLDER
app.register_blueprint(blueprint_main)
app.register_blueprint(blueprint_api_v_1_0, url_prefix='/api/v1.0')
return app
| {
"content_hash": "c2efaaf68622c2806440cf949a8b2f5a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 89,
"avg_line_length": 23.36,
"alnum_prop": 0.7080479452054794,
"repo_name": "fabioscala/poitour-backend",
"id": "b8256ffaf3f11f0e4d35b8b619f92ab5db0b9659",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "81128"
},
{
"name": "Shell",
"bytes": "8228"
}
],
"symlink_target": ""
} |
import unittest
import numpy
import collections
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.initializer import ConstantInitializer
from paddle.fluid.param_attr import WeightNormParamAttr
class TestWeightNormalization(unittest.TestCase):
batch_size = 3
hidden_size = 5
data_desc = (['x', [10], 0], )
@classmethod
def setUpClass(cls):
cls.set_program()
@classmethod
def set_program(cls):
data = fluid.layers.data(
name=cls.data_desc[0][0], shape=cls.data_desc[0][1])
out = fluid.layers.fc(input=data,
size=cls.hidden_size,
param_attr=WeightNormParamAttr(
dim=None,
name='weight_norm_param',
initializer=ConstantInitializer(1.0)),
bias_attr=False,
act=None)
loss = fluid.layers.reduce_sum(out)
fluid.backward.append_backward(loss=loss)
cls.fetch_list = [
'weight_norm_param_g', 'weight_norm_param_v',
'weight_norm_param_g@GRAD'
]
def run_program(self):
outputs = []
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self.set_inputs(place)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output = exe.run(fluid.default_main_program(),
feed=self.inputs,
fetch_list=self.fetch_list,
return_numpy=False)
outputs.append(output)
self.actual_outputs = outputs
def set_data(self):
self.data = collections.OrderedDict()
for desc in self.data_desc:
data_name = desc[0]
data_shape = desc[1]
data_lod_level = desc[2]
data_lod = []
for i in range(data_lod_level):
lod_level_i = numpy.random.randint(
low=1,
high=5,
size=self.batch_size
if i == 0 else sum(lod_level_i)).tolist()
data_lod.append(lod_level_i)
data_value = numpy.random.random(
size=[sum(data_lod[-1]) if data_lod else self.batch_size
] + data_shape).astype('float32')
self.data[data_name] = (data_value, data_lod)
def set_inputs(self, place):
self.inputs = {}
for desc in self.data_desc:
tensor = fluid.Tensor()
tensor.set(self.data[desc[0]][0], place)
if self.data[desc[0]][1]:
tensor.set_recursive_sequence_lengths(self.data[desc[0]][1])
self.inputs[desc[0]] = tensor
def weight_normalize(self):
v = numpy.ones((self.data[self.data_desc[0][0]][0].shape[-1],
self.hidden_size))
g = numpy.linalg.norm(v, axis=None, keepdims=True)
w = g * v / numpy.linalg.norm(v, axis=None, keepdims=True)
x = self.data[self.data_desc[0][0]][0]
out = numpy.dot(x, w)
g_grad = (numpy.dot(x.T, numpy.ones_like(out)) * (v / numpy.linalg.norm(
v, axis=None, keepdims=True))).sum(axis=None, keepdims=True)
return g, v, g_grad
def test_weight_normalization(self):
self.set_data()
self.run_program()
expect_output = self.weight_normalize()
for actual_output in self.actual_outputs:
[
self.assertTrue(
numpy.allclose(
numpy.array(actual), expect, atol=0.001))
for expect, actual in zip(expect_output, actual_output)
]
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "87f07f04c1b61c2e07823aea0fc8e088",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 36.77570093457944,
"alnum_prop": 0.5212198221092758,
"repo_name": "jacquesqiao/Paddle",
"id": "436f9b9f86fb86270e47c8e30c5c0701787ca0f1",
"size": "4548",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_weight_normalization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274815"
},
{
"name": "C++",
"bytes": "8229685"
},
{
"name": "CMake",
"bytes": "288709"
},
{
"name": "Cuda",
"bytes": "1123627"
},
{
"name": "Dockerfile",
"bytes": "8120"
},
{
"name": "Go",
"bytes": "109508"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "4194414"
},
{
"name": "Shell",
"bytes": "164656"
}
],
"symlink_target": ""
} |
"""
Manage the output messages for the user.
"""
import sys
OK = "ok"
ANOMALY = "ANOMALY"
SKIPPING = "collecting data"
INITIAL_STATE = (None, -1, -1)
def update_state(prev, skipped, anomaly_detected, timestamp):
""" Update the state and print the status to the stdout """
prev_state, prev_streak, last_change = prev
next_state = get_next_state(prev_state, skipped, anomaly_detected)
next_streak = None
if next_state == prev_state:
next_streak = prev_streak + 1
else:
last_change = timestamp
next_streak = 0
print_state(next_state, next_streak, timestamp, last_change)
return (next_state, next_streak, last_change)
def get_next_state(prev_state, skipped, anomaly_detected):
""" Determine the next state of the detector. """
if prev_state is None:
return SKIPPING
elif skipped:
return SKIPPING
elif anomaly_detected:
return ANOMALY
else:
return OK
def print_state(state, streak, timestamp, last_change):
""" Print the current state of the detector. """
msg = "{}[{}] :: {} - {}x ({}s)".format(
"\n" if last_change == timestamp else "\r",
last_change,
state,
streak,
(timestamp - last_change) / 1000.
)
sys.stdout.write(msg)
sys.stdout.flush()
| {
"content_hash": "eb8ed2c81fe6c91550c41d8d6db0b9f7",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 26.36,
"alnum_prop": 0.6198786039453718,
"repo_name": "simonrozsival/mff-ai-anomaly-detector",
"id": "18186b18b539caec0222b7675fa96b08884e5718",
"size": "1318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "detector/src/state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9021"
}
],
"symlink_target": ""
} |
import sys
import os
###############################################################################
# monkey patch sphinx to suppress "nonlocal image URI found" warning
# http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
# https://github.com/SuperCowPowers/workbench/issues/172
import sphinx.environment
from docutils.utils import get_source_line
def _warn_node(self, msg, node):
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
sphinx.environment.BuildEnvironment.warn_node = _warn_node
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../nphelper'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nphelper'
copyright = u'2015, Stefan Otte'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nphelperdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'nphelper.tex', u'nphelper Documentation',
u'Stefan Otte', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nphelper', u'nphelper Documentation',
[u'Stefan Otte'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'nphelper', u'nphelper Documentation',
u'Stefan Otte', 'nphelper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "c448e63ccc9e81364fc98ff3d6cbdcb1",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 106,
"avg_line_length": 31.863970588235293,
"alnum_prop": 0.7005884389061959,
"repo_name": "sotte/nphelper",
"id": "62331b9d12d661b41f6610e4b3976756e20f68ca",
"size": "9088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21972"
}
],
"symlink_target": ""
} |
import unittest
from hwt.hdl.constants import INTF_DIRECTION, DIRECTION
from hwt.pyUtils.arrayQuery import single, NoValueExc
class BaseSynthesizerTC(unittest.TestCase):
def assertIsM(self, intf):
self.assertEqual(intf._direction, INTF_DIRECTION.MASTER)
def assertIsS(self, intf):
self.assertEqual(intf._direction, INTF_DIRECTION.SLAVE)
def assertDir(self, u, portName, direction):
try:
p = self.getPort(u._ctx.ent, portName)
except NoValueExc: # pragma: no cover
self.assertTrue(False, f"port {portName:s} exists")
self.assertEqual(p.direction, direction, f"port {portName:s} should have direction {direction:s}")
def assertDirIn(self, u, portName):
self.assertDir(u, portName, DIRECTION.IN)
def assertDirOut(self, u, portName):
self.assertDir(u, portName, DIRECTION.OUT)
@staticmethod
def getPort(entity, portName):
return single(entity.ports, lambda x: x.name == portName)
| {
"content_hash": "194e4a321ba954fbfa245b22acd77401",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 106,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.688,
"repo_name": "Nic30/hwtLib",
"id": "9ba06dadae74b285bb0388cdea34672cab1bb5d9",
"size": "1000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/tests/synthesizer/interfaceLevel/baseSynthesizerTC.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
} |
from subprocess import check_call
from random import randint
from time import sleep
from abc import ABC
from os import makedirs
class captureAgent (ABC):
'''
captureAgent uses applescript helper functions to automatically
take pictures of parts of websites.
The class that instantiates this abstract base class sets the various
coordinates and sequece of operations.
'''
x = 0 # x coordinate offset of list
y = 1 # 7 coordinate offset of list
def __init__(self):
'''
Attributes:
self.dirHelpers = '/usr/local/bin/
self.dirTimePeriods = [self.dir1...self.dirn]
self.filePrefixes = [self.filePrefix1...self.filePrefixn]
self.url = "https://www.yahoo.com"
self.openWindow = self.dirHelpers+"mkswindow.scrpt"
self.argvalueWindow = self.url + " " + str(1200) + " " + str(1524)
# Locations of interest:
# These should be variables
# There will be many coordinate variables
self.BrowserLoc = [773, 45] # as an example
# Click stream to get to first snapshot
# Uses various coordinate variables to create click stream
self.getToFirst = [self.BrowserLoc]
# Click stream to get to weekly array
self.getToSecond = [self.BrowserLoc]
self.getTimePeriods = [self.getToFirst, self.getToSecond]
self.zeroBrowser = [self.HomeLoc]
self.clickCmd = self.dirHelpers+"clickScreen.scrpt"
self.snapshotCmd = self.dirHelpers+"screenshot.scrpt"
self.activateCmd = self.dirHelpers+"activate.scrpt"
self.randomWaitSecondsLow
self.randomWaitSecondsHigh
'''
def makeRandomWait(self, lowest, highest):
if randint(0, 1) == 0:
randomwait = randint(lowest, highest)+randint(2, 100)/100
print("randomwait + : ", randomwait)
else:
randomwait = randint(highest, highest)-randint(2, 50)/100
print("randomwait - : ", randomwait)
sleep(randomwait)
def createBrowserWindow(self):
# Create window of appropriate size in known location
check_call([self.openWindow, self.argvalueWindow])
sleep(randint(2, 7))
def setBrowserToZero(self):
'''
This is where we expect everything to start from
'''
for click in self.zeroBrowser:
xClick = click[self.x]+randint(0, 5)
yClick = click[self.y]+randint(0, 5)
argvalue = str(xClick) + " " + str(yClick)
print(self.clickCmd, argvalue)
check_call([self.clickCmd, argvalue])
def navigateToLocation(self, navLocationList):
for click in navLocationList:
xClick = click[self.x]+randint(0, 5)
yClick = click[self.y]+randint(0, 5)
argvalue = str(xClick) + " " + str(yClick)
print(self.clickCmd, argvalue)
check_call([self.clickCmd, argvalue])
self.makeRandomWait(self.randomWaitSecondsLow,
self.randomWaitSecondsHigh)
def createSnapshot(self, dirName, filePrefix):
'''
Create the snapshot
'''
path = self.dirArrays+dirName
argvalue = path + " " + filePrefix + " "
argvalue += str(self.ArrayTopLeftCap[0]) + " " + \
str(self.ArrayTopLeftCap[1]) + " "
argvalue += str(self.BottomRightCap[0]) + " " + \
str(self.BottomRightCap[1])
# Possible race condition here. OK for this application.
makedirs(path, exist_ok=True)
check_call([self.snapshotCmd, argvalue])
def getSingleSnapshot(self, dirName, filePrefix):
'''
For debugging and one off runs.
If your browser is at the right spot, just do a screenshot
Snapshot relevant part of screen to dir, filename
eg:
./% getSingleSnapshot directoryName/ filePrefix
'''
argvalue = ""
check_call([self.activateCmd, argvalue])
argvalue = self.dirArrays+dirName + " " + filePrefix + " "
argvalue += str(self.ArrayTopLeftCap[0]) + " " + \
str(self.ArrayTopLeftCap[1]) + " "
argvalue += str(self.BottomRightCap[0]) + " " + \
str(self.BottomRightCap[1])
self.createSnapshot(dirName, filePrefix)
def getAllSnapshots(self, timePeriods, dir4data,
filePrefixes, lowerRightCoordlist):
'''
Get to the right spot in the browser
Snapshot relevant part of screen to dir, filename
./% getAllSnapshots
'''
print(timePeriods)
print(dir4data)
print(filePrefixes)
for navList, dirName, filePrefix, lowerRightCoord in \
zip(timePeriods,
dir4data, filePrefixes, lowerRightCoordlist):
print(navList, dirName, filePrefix)
self.BottomRightCap = lowerRightCoord
self.navigateToLocation(navList)
self.createSnapshot(dirName, filePrefix)
| {
"content_hash": "47eb338a017555b861a1d457692ef529",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 74,
"avg_line_length": 38,
"alnum_prop": 0.6056588840522359,
"repo_name": "scarrico/BayAreaFresh",
"id": "ef753de7bad2c93114f59b68a3c8a5c9c00e7463",
"size": "5119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automation/mac/captureAgent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "5074"
},
{
"name": "C++",
"bytes": "7799"
},
{
"name": "Objective-C",
"bytes": "1301"
},
{
"name": "Python",
"bytes": "32687"
},
{
"name": "Shell",
"bytes": "3985"
}
],
"symlink_target": ""
} |
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^7!2q_5-opjjp*i8+57%1lb50d6yjua=bqdj#(kp2%ezg9jl+w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
("js", os.path.join(STATIC_ROOT,'js')),
("css", os.path.join(STATIC_ROOT,'css')),
("images", os.path.join(STATIC_ROOT,'images')),
("fonts", os.path.join(STATIC_ROOT,'fonts')),
)
| {
"content_hash": "a260eed7277ee29c1efcf20302ea5372",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 91,
"avg_line_length": 26.05426356589147,
"alnum_prop": 0.6810473073490033,
"repo_name": "sicnarf74/alpine-djangorestframework-docker",
"id": "4606e39a84e49c210539ce4fea268a022768914f",
"size": "3361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/project/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Nginx",
"bytes": "473"
},
{
"name": "Python",
"bytes": "5322"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
import ctypes
from collections import defaultdict, namedtuple
from . import lib_openal as al
from . import lib_alc as alc
import pyglet
from pyglet.debug import debug_print
from pyglet.media.exceptions import MediaException
_debug_media = debug_print('debug_media')
class OpenALException(MediaException):
def __init__(self, message=None, error_code=None, error_string=None):
self.message = message
self.error_code = error_code
self.error_string = error_string
def __str__(self):
if self.error_code is None:
return 'OpenAL Exception: {}'.format(self.message)
else:
return 'OpenAL Exception [{}: {}]: {}'.format(self.error_code,
self.error_string,
self.message)
class OpenALObject(object):
"""Base class for OpenAL objects."""
@classmethod
def _check_error(cls, message=None):
"""Check whether there is an OpenAL error and raise exception if present."""
error_code = al.alGetError()
if error_code != 0:
error_string = al.alGetString(error_code)
#TODO: Fix return type in generated code?
error_string = ctypes.cast(error_string, ctypes.c_char_p)
raise OpenALException(message=message,
error_code=error_code,
error_string=str(error_string.value))
@classmethod
def _raise_error(cls, message):
"""Raise an exception. Try to check for OpenAL error code too."""
cls._check_error(message)
raise OpenALException(message)
class OpenALDevice(OpenALObject):
"""OpenAL audio device."""
def __init__(self, device_name=None):
self._al_device = alc.alcOpenDevice(device_name)
self.check_context_error('Failed to open device.')
if self._al_device is None:
raise OpenALException('No OpenAL devices.')
def __del__(self):
self.delete()
def delete(self):
if self._al_device is not None:
if alc.alcCloseDevice(self._al_device) == alc.ALC_FALSE:
self._raise_context_error('Failed to close device.')
self._al_device = None
@property
def is_ready(self):
return self._al_device is not None
def create_context(self):
al_context = alc.alcCreateContext(self._al_device, None)
self.check_context_error('Failed to create context')
return OpenALContext(self, al_context)
def get_version(self):
major = alc.ALCint()
minor = alc.ALCint()
alc.alcGetIntegerv(self._al_device, alc.ALC_MAJOR_VERSION,
ctypes.sizeof(major), major)
self.check_context_error('Failed to get version.')
alc.alcGetIntegerv(self._al_device, alc.ALC_MINOR_VERSION,
ctypes.sizeof(minor), minor)
self.check_context_error('Failed to get version.')
return major.value, minor.value
def get_extensions(self):
extensions = alc.alcGetString(self._al_device, alc.ALC_EXTENSIONS)
self.check_context_error('Failed to get extensions.')
if pyglet.compat_platform == 'darwin' or pyglet.compat_platform.startswith('linux'):
return [x.decode('ascii')
for x
in ctypes.cast(extensions, ctypes.c_char_p).value.split(b' ')]
else:
return self._split_nul_strings(extensions)
@staticmethod
def _split_nul_strings(s):
# NUL-separated list of strings, double-NUL-terminated.
nul = False
i = 0
while True:
if s[i] == b'\0':
if nul:
break
else:
nul = True
else:
nul = False
i += 1
s = s[:i - 1]
return filter(None, [ss.strip().decode('ascii') for ss in s.split(b'\0')])
def check_context_error(self, message=None):
"""Check whether there is an OpenAL error and raise exception if present."""
error_code = alc.alcGetError(self._al_device)
if error_code != 0:
error_string = alc.alcGetString(self._al_device, error_code)
#TODO: Fix return type in generated code?
error_string = ctypes.cast(error_string, ctypes.c_char_p)
raise OpenALException(message=message,
error_code=error_code,
error_string=str(error_string.value))
def _raise_context_error(self, message):
"""Raise an exception. Try to check for OpenAL error code too."""
self.check_context_error(message)
raise OpenALException(message)
class OpenALContext(OpenALObject):
def __init__(self, device, al_context):
self.device = device
self._al_context = al_context
self.make_current()
def __del__(self):
self.delete()
def delete(self):
if self._al_context is not None:
# TODO: Check if this context is current
alc.alcMakeContextCurrent(None)
self.device.check_context_error('Failed to make context no longer current.')
alc.alcDestroyContext(self._al_context)
self.device.check_context_error('Failed to destroy context.')
self._al_context = None
def make_current(self):
alc.alcMakeContextCurrent(self._al_context)
self.device.check_context_error('Failed to make context current.')
def create_source(self):
self.make_current()
return OpenALSource(self)
class OpenALSource(OpenALObject):
def __init__(self, context):
self.context = context
self.buffer_pool = OpenALBufferPool(context)
self._al_source = al.ALuint()
al.alGenSources(1, self._al_source)
self._check_error('Failed to create source.')
self._state = None
self._get_state()
self._owned_buffers = {}
def __del__(self):
self.delete()
def delete(self):
if self._al_source is not None:
al.alDeleteSources(1, self._al_source)
self._check_error('Failed to delete source.')
# TODO: delete buffers in use
self.buffer_pool.clear()
self._al_source = None
@property
def is_initial(self):
self._get_state()
return self._state == al.AL_INITIAL
@property
def is_playing(self):
self._get_state()
return self._state == al.AL_PLAYING
@property
def is_paused(self):
self._get_state()
return self._state == al.AL_PAUSED
@property
def is_stopped(self):
self._get_state()
return self._state == al.AL_STOPPED
def _int_source_property(attribute):
return property(lambda self: self._get_int(attribute),
lambda self, value: self._set_int(attribute, value))
def _float_source_property(attribute):
return property(lambda self: self._get_float(attribute),
lambda self, value: self._set_float(attribute, value))
def _3floats_source_property(attribute):
return property(lambda self: self._get_3floats(attribute),
lambda self, value: self._set_3floats(attribute, value))
position = _3floats_source_property(al.AL_POSITION)
velocity = _3floats_source_property(al.AL_VELOCITY)
gain = _float_source_property(al.AL_GAIN)
buffers_queued = _int_source_property(al.AL_BUFFERS_QUEUED)
buffers_processed = _int_source_property(al.AL_BUFFERS_PROCESSED)
min_gain = _float_source_property(al.AL_MIN_GAIN)
max_gain = _float_source_property(al.AL_MAX_GAIN)
reference_distance = _float_source_property(al.AL_REFERENCE_DISTANCE)
rolloff_factor = _float_source_property(al.AL_ROLLOFF_FACTOR)
pitch = _float_source_property(al.AL_PITCH)
max_distance = _float_source_property(al.AL_MAX_DISTANCE)
direction = _3floats_source_property(al.AL_DIRECTION)
cone_inner_angle =_float_source_property(al.AL_CONE_INNER_ANGLE)
cone_outer_angle = _float_source_property(al.AL_CONE_OUTER_ANGLE)
cone_outer_gain = _float_source_property(al.AL_CONE_OUTER_GAIN)
sec_offset = _float_source_property(al.AL_SEC_OFFSET)
sample_offset = _float_source_property(al.AL_SAMPLE_OFFSET)
byte_offset = _float_source_property(al.AL_BYTE_OFFSET)
del _int_source_property
del _float_source_property
del _3floats_source_property
def play(self):
al.alSourcePlay(self._al_source)
self._check_error('Failed to play source.')
def pause(self):
al.alSourcePause(self._al_source)
self._check_error('Failed to pause source.')
def stop(self):
al.alSourceStop(self._al_source)
self._check_error('Failed to stop source.')
def get_buffer(self):
return self.buffer_pool.get_buffer()
def queue_buffer(self, buf):
assert buf.is_valid
al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buf.al_buffer))
self._check_error('Failed to queue buffer.')
self._add_buffer(buf)
def unqueue_buffers(self):
processed = self.buffers_processed
assert _debug_media("Processed buffer count: {}".format(processed))
if processed > 0:
buffers = (al.ALuint * processed)()
al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers)
self._check_error('Failed to unqueue buffers from source.')
for buf in buffers:
self.buffer_pool.unqueue_buffer(self._pop_buffer(buf))
return processed
def _get_state(self):
if self._al_source is not None:
self._state = self._get_int(al.AL_SOURCE_STATE)
def _get_int(self, key):
assert self._al_source is not None
al_int = al.ALint()
al.alGetSourcei(self._al_source, key, al_int)
self._check_error('Failed to get value')
return al_int.value
def _set_int(self, key, value):
assert self._al_source is not None
al.alSourcei(self._al_source, key, int(value))
self._check_error('Failed to set value.')
def _get_float(self, key):
assert self._al_source is not None
al_float = al.ALfloat()
al.alGetSourcef(self._al_source, key, al_float)
self._check_error('Failed to get value')
return al_float.value
def _set_float(self, key, value):
assert self._al_source is not None
al.alSourcef(self._al_source, key, float(value))
self._check_error('Failed to set value.')
def _get_3floats(self, key):
assert self._al_source is not None
x = al.ALfloat()
y = al.ALfloat()
z = al.ALfloat()
al.alGetSource3f(self._al_source, key, x, y, z)
self._check_error('Failed to get value')
return x.value, y.value, z.value
def _set_3floats(self, key, values):
assert self._al_source is not None
x, y, z = map(float, values)
al.alSource3f(self._al_source, key, x, y, z)
self._check_error('Failed to set value.')
def _add_buffer(self, buf):
self._owned_buffers[buf.name] = buf
def _pop_buffer(self, al_buffer):
buf = self._owned_buffers.pop(al_buffer, None)
assert buf is not None
return buf
OpenALOrientation = namedtuple("OpenALOrientation", ['at', 'up'])
class OpenALListener(OpenALObject):
def _float_source_property(attribute):
return property(lambda self: self._get_float(attribute),
lambda self, value: self._set_float(attribute, value))
def _3floats_source_property(attribute):
return property(lambda self: self._get_3floats(attribute),
lambda self, value: self._set_3floats(attribute, value))
position = _3floats_source_property(al.AL_POSITION)
velocity = _3floats_source_property(al.AL_VELOCITY)
gain = _float_source_property(al.AL_GAIN)
@property
def orientation(self):
values = self._get_float_vector(al.AL_ORIENTATION, 6)
return OpenALOrientation(values[0:3], values[3:6])
@orientation.setter
def orientation(self, values):
if len(values) == 2:
actual_values = values[0] + values[1]
elif len(values) == 6:
actual_values = values
else:
actual_values = []
if len(actual_values) != 6:
raise ValueError("Need 2 tuples of 3 or 1 tuple of 6.")
self._set_float_vector(al.AL_ORIENTATION, actual_values)
def _get_float(self, key):
al_float = al.ALfloat()
al.alGetListenerf(key, al_float)
self._check_error('Failed to get value')
return al_float.value
def _set_float(self, key, value):
al.alListenerf(key, float(value))
self._check_error('Failed to set value.')
def _get_3floats(self, key):
x = al.ALfloat()
y = al.ALfloat()
z = al.ALfloat()
al.alGetListener3f(key, x, y, z)
self._check_error('Failed to get value')
return x.value, y.value, z.value
def _set_3floats(self, key, values):
x, y, z = map(float, values)
al.alListener3f(key, x, y, z)
self._check_error('Failed to set value.')
def _get_float_vector(self, key, count):
al_float_vector = (al.ALfloat * count)()
al.alGetListenerfv(key, al_float_vector)
self._check_error('Failed to get value')
return [x for x in al_float_vector]
def _set_float_vector(self, key, values):
al_float_vector = (al.ALfloat * len(values))(*values)
al.alListenerfv(key, al_float_vector)
self._check_error('Failed to set value.')
class OpenALBuffer(OpenALObject):
_format_map = {
(1, 8): al.AL_FORMAT_MONO8,
(1, 16): al.AL_FORMAT_MONO16,
(2, 8): al.AL_FORMAT_STEREO8,
(2, 16): al.AL_FORMAT_STEREO16,
}
def __init__(self, al_buffer, context):
self._al_buffer = al_buffer
self.context = context
assert self.is_valid
def __del__(self):
self.delete()
@property
def is_valid(self):
self._check_error('Before validate buffer.')
if self._al_buffer is None:
return False
valid = bool(al.alIsBuffer(self._al_buffer))
if not valid:
# Clear possible error due to invalid buffer
al.alGetError()
return valid
@property
def al_buffer(self):
assert self.is_valid
return self._al_buffer
@property
def name(self):
assert self.is_valid
return self._al_buffer.value
def delete(self):
if self.is_valid:
al.alDeleteBuffers(1, ctypes.byref(self._al_buffer))
self._check_error('Error deleting buffer.')
self._al_buffer = None
def data(self, audio_data, audio_format, length=None):
assert self.is_valid
length = length or audio_data.length
al_format = self._format_map[(audio_format.channels, audio_format.sample_size)]
al.alBufferData(self._al_buffer,
al_format,
audio_data.data,
length,
audio_format.sample_rate)
self._check_error('Failed to add data to buffer.')
class OpenALBufferPool(OpenALObject):
"""At least Mac OS X doesn't free buffers when a source is deleted; it just
detaches them from the source. So keep our own recycled queue.
"""
def __init__(self, context):
self.context = context
self._buffers = [] # list of free buffer names
def __del__(self):
self.clear()
def __len__(self):
return len(self._buffers)
def clear(self):
while self._buffers:
self._buffers.pop().delete()
def get_buffer(self):
"""Convenience for returning one buffer name"""
return self.get_buffers(1)[0]
def get_buffers(self, number):
"""Returns an array containing `number` buffer names. The returned list must
not be modified in any way, and may get changed by subsequent calls to
get_buffers.
"""
buffers = []
while number > 0:
if self._buffers:
b = self._buffers.pop()
else:
b = self.create_buffer()
if b.is_valid:
# Protect against implementations that DO free buffers
# when they delete a source - carry on.
buffers.append(b)
number -= 1
return buffers
def unqueue_buffer(self, buf):
"""A buffer has finished playing, free it."""
if buf.is_valid:
self._buffers.append(buf)
def create_buffer(self):
"""Create a new buffer."""
al_buffer = al.ALuint()
al.alGenBuffers(1, al_buffer)
self._check_error('Error allocating buffer.')
return OpenALBuffer(al_buffer, self.context)
| {
"content_hash": "632658e15d3d47dcb3f72436ef6a4391",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 92,
"avg_line_length": 34.518,
"alnum_prop": 0.5951097977866621,
"repo_name": "nicememory/pie",
"id": "2c2ce60747e61ba3d27c8277533ef56f5f26057d",
"size": "18981",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyglet/pyglet/media/drivers/openal/interface.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5318"
},
{
"name": "C",
"bytes": "6624"
},
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "9229"
},
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "Makefile",
"bytes": "5773"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9377528"
},
{
"name": "Shell",
"bytes": "664"
},
{
"name": "Vim script",
"bytes": "2952"
}
],
"symlink_target": ""
} |
"""
grid.place_network_ui.py -- Chaco2 and Traits UI interface for PlaceNetwork model class
Subclass of grid.place_network.PlaceNetwork which enables a graphical interface.
Copyright (c) 2007 Columbia University. All rights reserved.
"""
# Library imports
import numpy, time, wx
# Package imports
from .place_network import PlaceNetwork
from .core.view_model import ViewModel, ViewModelHandler
from .tools.images import array_to_rgba
# Traits imports
from enthought.traits.api import Property, Array, Instance, Range, Float, Bool
from enthought.traits.ui.api import View, VSplit, Group, Item, Heading, Include
# Chaco2 imports
from enthought.chaco.api import Plot, PlotLabel, ArrayPlotData, DataRange1D, hot
from enthought.enable.component_editor import ComponentEditor
# Global constants
COL_WIDTH = 240
DISP_UNITS = 200
# View for model initialization
initialization_view = View(
Heading('Set values prior to running simulation'),
Group(
Group(
Item(name='desc', label='Description', width=COL_WIDTH),
Item(name='num_trials', label='Trials'),
Item(name='traj_type', label='Trajectory'),
Item(name='dwell_factor', enabled_when="traj_type!='randwalk'"),
Item(name='T', label='Duration (s)', enabled_when="traj_type=='randwalk'"),
Item(name='dt', label='dt (s)'),
Item(name='monitor_dt', label='Display dt (s)'),
Item(name='N_CA', label='Output Units', style='readonly'),
Item(name='N_EC', label='Grid Inputs', style='readonly'),
Item(name='C_W', label='Connectivity'),
label='Initialization',
show_border=True),
Group(
Item(name='J0', width=COL_WIDTH),
Item(name='tau_r', label='Tau (s)'),
Group(
Item(name='phi_lambda', label='Threshold'),
Item(name='phi_sigma', label='Smoothness'),
label='Field Nonlinearity:'),
label='Parameters',
show_border=True),
orientation='horizontal'),
Item(name='growl', label='Growl On/Off', style='simple'),
Item(name='projdir', label='Project Directory'),
buttons=['Revert', 'Cancel', 'OK'],
title='Model Setup',
kind='livemodal',
resizable=False)
# View for main simulation visualization
simulation_view = View(
VSplit(
Group(
Item(name='field_plots', editor=ComponentEditor()),
Item(name='units_plot', editor=ComponentEditor()),
Item(name='traj_plot', editor=ComponentEditor()),
show_border=False,
show_labels=False,
orientation='horizontal'),
Group(
Group(
Group(
Item(name='J0'),
label='Input Gain',
show_border=True),
Group(
Item(name='phi_lambda', label='Lambda'),
Item(name='phi_sigma', label='Sigma'),
label='Field Nonlinearity',
show_border=True),
Group(
Item(name='trail_length'),
Item(name='_program_flow', show_label=False, enabled_when='done==False'),
Item(name='_reset_simulation', show_label=False),
show_border=False),
springy=True,
show_border=False),
Item(name='phi_plot', editor=ComponentEditor(), width=0.4),
show_border=False,
show_labels=False,
orientation='horizontal'),
show_labels=False,
show_border=False),
title='PlaceNetwork Simulation',
resizable=True,
height=1.0,
width=1.0,
buttons=['Revert', 'Cancel', 'OK'],
kind='live')
class PlaceNetworkUI(ViewModel, PlaceNetwork):
"""
PlaceNetwork model with Traits UI graphical real-time interface
"""
pause = True
# View traits
sim_view = simulation_view
init_view = initialization_view
# Plot instances
field_plots = Instance(Plot)
units_plot = Instance(Plot)
traj_plot = Instance(Plot)
phi_plot = Instance(Plot)
# Redefine user parameters as Range traits for sliders
J0 = Range(low=0.0, high=100.0, value=45)
# Control nonlinearity variables with sliders
phi_lambda = Range(low=0.0, high=0.5, value=0.04)
phi_sigma = Range(low=0.001, high=1.0, value=0.02)
# Field plots tracking data
h_aff = Property(Float, track=True)
h_rec = Property(Float, track=True)
h_sum = Property(Float, track=True)
# Phi plot data
h_range = Property(Array)
phi_sample = Property(Array)
_phi_updated = Bool(False)
t0 = Float
# Add to the simulation timestep
def run_timestep(self):
PlaceNetwork.run_timestep(self)
# NOP to allow GUI to process
time.sleep(.001)
# Creating Plot instances as trait default functions
def _field_plots_default(self):
zero = numpy.array([0], 'd')
data = ArrayPlotData(t=zero, h_rec=zero, h_aff=zero, h_sum=zero)
p = Plot(data)
p.plot(('t', 'h_aff'), name='Afferent', type='line', line_width=1, color='royalblue')
p.plot(('t', 'h_rec'), name='Recurrent', type='line', line_width=1, color='tomato')
p.plot(('t', 'h_sum'), name='Total', type='line', line_width=1, color='sienna')
p.legend.visible = True
p.legend.border_visible = False
p.legend.align = 'ur'
p.legend.bgcolor = (0.8, 0.8, 1.0, 0.4)
p.legend.border_padding = 6
p.legend.labels = ['Afferent', 'Recurrent', 'Total']
p.y_grid.visible = p.x_grid.visible = False
p.title = 'Synaptic Fields'
p.x_axis.title = 'Time (s)'
p.y_axis.title = 'Field Strength'
p.bgcolor = 'mintcream'
return p
def _units_plot_default(self):
N = min([self.N_CA, DISP_UNITS])
data = ArrayPlotData(i=numpy.arange(N), r=self.r[:N], i_aff=self.i_aff[:N])
p = Plot(data)
p.plot(('i', 'r', 'i_aff'), type='cmap_scatter', color_mapper=hot,
marker='circle', marker_size=3, line_width=0)
p.title = 'Place Cell Output'
p.x_axis.title = 'Output Units'
p.y_axis.title = 'Rate / Iaff'
p.value_range.set_bounds(0.0, 1.0)
p.x_grid.visible = p.y_grid.visible = False
p.bgcolor = 'slategray'
return p
def _traj_plot_default(self):
"""Trajectory plot based on TrajectoryView.t_plot in chaco_threading_demo"""
zero = numpy.array([0], 'd')
data = ArrayPlotData(x=zero, y=zero)
h, w = self.traj.Map.H, self.traj.Map.W
data.set_data('x0', zero + self.traj.Map.x0[0])
data.set_data('y0', zero + self.traj.Map.x0[1])
p = Plot(data)
p.plot(('x', 'y'), name='trail', color='red')
p.plot(('x0', 'y0'), name='head', type='scatter', marker='circle', color='red')
p.y_axis.visible = p.x_axis.visible = False
p.y_grid.visible = p.x_grid.visible = False
p.border_visible = True
p.border_width = 2
p.title = 'Rat Trajectory'
p.index_range.set_bounds(0, w)
p.value_range.set_bounds(0, h)
p.overlays.append(PlotLabel('X (%d cm)'%w, component=p, overlay_position='bottom'))
p.overlays.append(PlotLabel('Y (%d cm)'%h, component=p, overlay_position='left', angle=90))
return p
def _phi_plot_default(self):
data = ArrayPlotData(h=self.h_range, phi=self.phi_sample)
p = Plot(data)
p.plot(('h', 'phi'), type='line', name='phi', color='slateblue', line_width=2.7)
p.x_axis.title = 'h'
p.y_axis.title = 'Phi[h]'
p.x_grid.line_color = p.y_grid.line_color = 'slategray'
p.bgcolor = 'khaki'
p.title = 'Nonlinearity'
return p
# Callback for updating plot data
def _update_plots(self):
# Field plots data trails
t, h_aff, h_rec, h_sum = self._trails('t', 'h_aff', 'h_rec', 'h_sum')
if self.t > self.dt:
self.field_plots.data.set_data('t', t)
self.field_plots.data.set_data('h_aff', h_aff)
self.field_plots.data.set_data('h_rec', h_rec)
self.field_plots.data.set_data('h_sum', h_sum)
# Trajectory trails
new_x, new_y = self._trails('x', 'y')
self.traj_plot.data.set_data('x', new_x)
self.traj_plot.data.set_data('y', new_y)
self.traj_plot.data.set_data('x0', numpy.array([new_x[-1]]))
self.traj_plot.data.set_data('y0', numpy.array([new_y[-1]]))
# Units plot update
N = min([self.N_CA, DISP_UNITS])
self.units_plot.data.set_data('r', self.r[:N])
self.units_plot.data.set_data('i_aff', self.i_aff[:N])
self.units_plot.value_range.high_setting = max([1, 1.05*self.r[:N].max()])
# Phi data update
self._update_phi_plot()
def _update_phi_plot(self):
if self._phi_updated:
self.phi_plot.data.set_data('h', self.h_range)
self.phi_plot.data.set_data('phi', self.phi_sample)
self._phi_updated = False
# Trajectory changes refresh the plot
def _stage_changed(self):
self._refresh_traj_plot()
def _traj_type_changed(self):
self._refresh_traj_plot()
def _refresh_traj_plot(self):
self.traj = self.new_trajectory()
self.traj_plot = self._traj_plot_default()
# Field tracking properties and trait notifications
def _get_h_aff(self):
return self.i_aff.mean()
def _get_h_rec(self):
return -self.J0 * self.r.sum()
def _get_h_sum(self):
return self.h_aff + self.h_rec
# Nonlinearity plot automation and line data
def _get_phi_sample(self):
return self.phi_h(self.h_range - self.phi_lambda)
def _get_h_range(self):
return numpy.arange(0, max([2.5, 2.5*self.phi_lambda]), 0.02)
def _phi_lambda_changed(self):
self._phi_pause_update()
def _phi_sigma_changed(self):
self._phi_pause_update()
def _phi_pause_update(self):
self._phi_updated = True
if self.pause:
self._update_phi_plot()
# Convenience functions for calling views
def setup(self):
self.configure_traits(view='init_view')
def simulation(self):
self.configure_traits(view='sim_view', handler=ViewModelHandler())
if __name__ == "__main__":
import os
from .dmec import GridCollection
EC = GridCollection()
ca3 = PlaceNetworkUI(EC=EC, C_W=0.33, growl=False, T=300, desc='demo run')
ca3.setup()
ca3.simulation()
| {
"content_hash": "a46d0b93646d76aff28adcf9988b554d",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 111,
"avg_line_length": 39.15238095238095,
"alnum_prop": 0.504986621260034,
"repo_name": "jdmonaco/grid-remapping-model",
"id": "0cd614b06160c7c2f6c21a4657b8bf63c064e89b",
"size": "12372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/place_network_ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "338176"
},
{
"name": "Shell",
"bytes": "7912"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, proximity_placement_group_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"proximityPlacementGroupName": _SERIALIZER.url(
"proximity_placement_group_name", proximity_placement_group_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, proximity_placement_group_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"proximityPlacementGroupName": _SERIALIZER.url(
"proximity_placement_group_name", proximity_placement_group_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, proximity_placement_group_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"proximityPlacementGroupName": _SERIALIZER.url(
"proximity_placement_group_name", proximity_placement_group_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str,
proximity_placement_group_name: str,
subscription_id: str,
*,
include_colocation_status: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"proximityPlacementGroupName": _SERIALIZER.url(
"proximity_placement_group_name", proximity_placement_group_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if include_colocation_status is not None:
_params["includeColocationStatus"] = _SERIALIZER.query(
"include_colocation_status", include_colocation_status, "str"
)
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/proximityPlacementGroups"
)
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ProximityPlacementGroupsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_03_01.ComputeManagementClient`'s
:attr:`proximity_placement_groups` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
def create_or_update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: _models.ProximityPlacementGroup,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProximityPlacementGroup:
"""Create or update a proximity placement group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Create Proximity Placement Group operation.
Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProximityPlacementGroup:
"""Create or update a proximity placement group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Create Proximity Placement Group operation.
Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: Union[_models.ProximityPlacementGroup, IO],
**kwargs: Any
) -> _models.ProximityPlacementGroup:
"""Create or update a proximity placement group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Create Proximity Placement Group operation. Is
either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ProximityPlacementGroup]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ProximityPlacementGroup")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ProximityPlacementGroup", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ProximityPlacementGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}"} # type: ignore
@overload
def update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: _models.ProximityPlacementGroupUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProximityPlacementGroup:
"""Update a proximity placement group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Update Proximity Placement Group operation.
Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroupUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProximityPlacementGroup:
"""Update a proximity placement group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Update Proximity Placement Group operation.
Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: Union[_models.ProximityPlacementGroupUpdate, IO],
**kwargs: Any
) -> _models.ProximityPlacementGroup:
"""Update a proximity placement group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Update Proximity Placement Group operation. Is
either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroupUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ProximityPlacementGroup]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ProximityPlacementGroupUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProximityPlacementGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, proximity_placement_group_name: str, **kwargs: Any
) -> None:
"""Delete a proximity placement group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
proximity_placement_group_name: str,
include_colocation_status: Optional[str] = None,
**kwargs: Any
) -> _models.ProximityPlacementGroup:
"""Retrieves information about a proximity placement group .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group. Required.
:type proximity_placement_group_name: str
:param include_colocation_status: includeColocationStatus=true enables fetching the colocation
status of all the resources in the proximity placement group. Default value is None.
:type include_colocation_status: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ProximityPlacementGroup]
request = build_get_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
include_colocation_status=include_colocation_status,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProximityPlacementGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}"} # type: ignore
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.ProximityPlacementGroup"]:
"""Lists all proximity placement groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProximityPlacementGroup or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ProximityPlacementGroupListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ProximityPlacementGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/proximityPlacementGroups"} # type: ignore
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> Iterable["_models.ProximityPlacementGroup"]:
"""Lists all proximity placement groups in a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProximityPlacementGroup or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.ProximityPlacementGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ProximityPlacementGroupListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ProximityPlacementGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups"} # type: ignore
| {
"content_hash": "a1131ece934baac27acada6525204f80",
"timestamp": "",
"source": "github",
"line_count": 814,
"max_line_length": 208,
"avg_line_length": 44.853808353808354,
"alnum_prop": 0.6493111664977678,
"repo_name": "Azure/azure-sdk-for-python",
"id": "8f282710cde00c87e21c868c60e008a079ed19c5",
"size": "37011",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/operations/_proximity_placement_groups_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from oslotest import base as test_base
import testscenarios.testcase
from oslo.i18n import _locale
class LocaleDirVariableTest(testscenarios.testcase.WithScenarios,
test_base.BaseTestCase):
scenarios = [
('simple', {'domain': 'simple', 'expected': 'SIMPLE_LOCALEDIR'}),
('with_dot', {'domain': 'one.two', 'expected': 'ONE_TWO_LOCALEDIR'}),
('with_dash', {'domain': 'one-two', 'expected': 'ONE_TWO_LOCALEDIR'}),
]
def test_make_variable_name(self):
var = _locale.get_locale_dir_variable_name(self.domain)
self.assertEqual(self.expected, var)
| {
"content_hash": "8989d8315f2ca8d27454136be69167ba",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 34.77777777777778,
"alnum_prop": 0.6357827476038339,
"repo_name": "citrix-openstack-build/oslo.i18n",
"id": "ca874829352983a37f1d0e38b344426463e438de",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_locale_dir_variable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Define tests for v2 Sensor objects."""
# pylint: disable=unused-argument
import aiohttp
import pytest
from simplipy import API
from simplipy.errors import SimplipyError
from tests.common import TEST_AUTHORIZATION_CODE, TEST_CODE_VERIFIER, TEST_SYSTEM_ID
@pytest.mark.asyncio
async def test_properties_v2(aresponses, v2_server):
"""Test that v2 sensor properties are created properly."""
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_auth(
TEST_AUTHORIZATION_CODE, TEST_CODE_VERIFIER, session=session
)
systems = await simplisafe.async_get_systems()
system = systems[TEST_SYSTEM_ID]
keypad = system.sensors["195"]
assert keypad.data == 0
assert not keypad.error
assert not keypad.low_battery
assert keypad.settings == 1
# Ensure that attempting to access the triggered of anything but
# an entry sensor in a V2 system throws an error:
with pytest.raises(SimplipyError):
assert keypad.triggered == 42
entry_sensor = system.sensors["609"]
assert entry_sensor.data == 130
assert not entry_sensor.error
assert not entry_sensor.low_battery
assert entry_sensor.settings == 1
assert not entry_sensor.trigger_instantly
assert not entry_sensor.triggered
aresponses.assert_plan_strictly_followed()
| {
"content_hash": "cdd69fb4fa098d1db8d85d8ead4c6fe1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 35.5,
"alnum_prop": 0.6859154929577465,
"repo_name": "w1ll1am23/simplisafe-python",
"id": "9115043df0ab4beb770002f44e644d4115ac2c73",
"size": "1420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sensor/test_v2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "442"
},
{
"name": "Python",
"bytes": "36192"
}
],
"symlink_target": ""
} |
from .fitters import *
from .regions import *
from .optregion import *
from .findbase import *
from .fit import *
from .utils import OptionalAttributeError
| {
"content_hash": "1398b0c7ce8b18aa9065d007f7327445",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 41,
"avg_line_length": 26,
"alnum_prop": 0.7756410256410257,
"repo_name": "hamogu/psfsubtraction",
"id": "6052eb0c63e3acfbf90dd28c30532b8a4e336161",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psfsubtraction/fitpsf/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98200"
}
],
"symlink_target": ""
} |
from . someclass import *
| {
"content_hash": "b8684e879472271b41cb1e5eab1836f3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 26,
"alnum_prop": 0.7307692307692307,
"repo_name": "audy/SomePackage",
"id": "b6a239ab2e0dc4426c830af2d6d43db170ffa501",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "somepackage/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2764"
}
],
"symlink_target": ""
} |
from .action import *
from .page import *
from .form import *
from .hook import *
@IN.register
def register():
'''
instance :
class - class type assigned directly
instance - instance will be created and assigned, all object will use the same member instance
'''
return {
# all field type class should have Fielder member of type which is
'class_members' : { # register for
'Field' : { # type of object - arg to class members
'FieldConfigForm' : { # key
'name' : 'FieldConfigForm', # member name
'instance' : 'class', # type of instance
},
},
},
}
| {
"content_hash": "e9721b7b1ae6049a54e3ccc80d071e8b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 96,
"avg_line_length": 26.52173913043478,
"alnum_prop": 0.6295081967213115,
"repo_name": "vinoth3v/In",
"id": "3b6708b21c9042b611463c617e463dca470ddf08",
"size": "610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "In/field/admin/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "33032"
},
{
"name": "Python",
"bytes": "779047"
}
],
"symlink_target": ""
} |
from django import forms
from tinycontent.models import TinyContent
class GIODOMixin(object):
def __init__(self, *args, **kwargs):
super(GIODOMixin, self).__init__(*args, **kwargs)
self.fields['giodo'] = forms.BooleanField(required=True)
try:
self.fields['giodo'].label = TinyContent.get_content_by_name('giodo').content
except TinyContent.DoesNotExist:
self.fields['giodo'].label = 'Lorem ipsum'
| {
"content_hash": "831d7a07e4bdff3d7d07147897d9d66c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 35.38461538461539,
"alnum_prop": 0.65,
"repo_name": "ad-m/django-atom",
"id": "fabd72a4392f94024ffe132283a045f7e026debb",
"size": "460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atom/ext/tinycontent/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Python",
"bytes": "48546"
}
],
"symlink_target": ""
} |
"""Serves a list of stations found via multicast.
Run with:
python -m openhtf.output.servers.dashboard_server
"""
import argparse
import collections
import json
import logging
import socket
import threading
import time
from openhtf.output.servers import pub_sub
from openhtf.output.servers import station_server
from openhtf.output.servers import web_gui_server
from openhtf.output.web_gui import web_launcher
from openhtf.util import data
from openhtf.util import multicast
import sockjs.tornado
import tornado.web
_LOG = logging.getLogger(__name__)
DASHBOARD_SERVER_TYPE = 'dashboard'
class StationInfo( # pylint: disable=missing-class-docstring
collections.namedtuple('StationInfo', [
'cell',
'host',
'port',
'station_id',
'status',
'test_description',
'test_name',
])):
pass
def _discover(**kwargs):
"""Yields info about station servers announcing themselves via multicast."""
query = station_server.MULTICAST_QUERY
for host, response in multicast.send(query, **kwargs):
try:
result = json.loads(response)
except ValueError:
_LOG.warning('Received bad JSON over multicast from %s: %s', host,
response)
try:
yield StationInfo(result['cell'], host, result['port'],
result['station_id'], 'ONLINE',
result.get('test_description'), result['test_name'])
except KeyError:
if 'last_activity_time_millis' in result:
_LOG.debug('Received old station API response on multicast. Ignoring.')
else:
_LOG.warning('Received bad multicast response from %s: %s', host,
response)
class StationListHandler(tornado.web.RequestHandler):
"""GET endpoint for the list of available stations.
Sends the same message provided by DashboardPubSub.
"""
def get(self):
self.write(DashboardPubSub.make_message())
class DashboardPubSub(pub_sub.PubSub):
"""WebSocket endpoint for the list of available stations."""
_lock = threading.Lock() # Required by pub_sub.PubSub.
subscribers = set() # Required by pub_sub.PubSub.
last_message = None
station_map = {}
station_map_lock = threading.Lock()
def on_subscribe(self, unused_info):
"""Called by the base class when a client connects."""
if self.last_message is not None:
self.send(self.last_message)
@classmethod
def update_stations(cls, station_info_list):
"""Called by the station discovery loop to update the station map."""
with cls.station_map_lock:
# By default, assume old stations are unreachable.
for host_port, station_info in cls.station_map.items():
cls.station_map[host_port] = station_info._replace(status='UNREACHABLE')
for station_info in station_info_list:
host_port = '%s:%s' % (station_info.host, station_info.port)
cls.station_map[host_port] = station_info
@classmethod
def publish_if_new(cls):
"""If the station map has changed, publish the new information."""
message = cls.make_message()
if message != cls.last_message:
super(DashboardPubSub, cls).publish(message)
cls.last_message = message
@classmethod
def make_message(cls):
with cls.station_map_lock:
return data.convert_to_base_types(cls.station_map)
class DashboardServer(web_gui_server.WebGuiServer):
"""Serves a list of known stations and an Angular frontend."""
def __init__(self, port):
dash_router = sockjs.tornado.SockJSRouter(DashboardPubSub, '/sub/dashboard')
routes = dash_router.urls + [
('/station_list', StationListHandler),
]
super(DashboardServer, self).__init__(routes, port)
def _get_config(self):
return {
'server_type': DASHBOARD_SERVER_TYPE,
}
def run(self):
_LOG.info('Starting dashboard server at:\n' # pylint: disable=logging-format-interpolation
' Local: http://localhost:{port}\n'
' Remote: http://{host}:{port}'.format(
host=socket.gethostname(), port=self.port))
super(DashboardServer, self).run()
def stop(self):
_LOG.info('Stopping dashboard server.')
super(DashboardServer, self).stop()
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description='Serves web GUI for interacting with multiple OpenHTF '
'stations.')
parser.add_argument(
'--discovery-interval-s',
type=int,
default=1,
help='Seconds between station discovery attempts.')
parser.add_argument(
'--launch-web-gui',
default=True,
action='store_true',
help='Whether to automatically open web GUI.')
parser.add_argument(
'--no-launch-web-gui',
dest='launch_web_gui',
action='store_false',
help='Whether to automatically open web GUI.')
parser.add_argument(
'--dashboard-server-port',
type=int,
default=12000,
help='Port on which to serve the dashboard server.')
# These have default values in openhtf.util.multicast.py.
parser.add_argument('--station-discovery-address', type=str)
parser.add_argument('--station-discovery-port', type=int)
parser.add_argument('--station-discovery-ttl', type=int)
parser.add_argument(
'--no-local-only',
action='store_false',
default=True,
dest='station_discovery_local_only',
help=('Whether to discover only local stations.'))
args = parser.parse_args()
with DashboardServer(args.dashboard_server_port) as server:
if args.launch_web_gui:
url = 'http://localhost:%s' % (server.port,)
try:
web_launcher.launch(url)
except Exception: # pylint: disable=broad-except
_LOG.exception('Problem launching web gui')
# Make kwargs from command line arguments.
multicast_kwargs = {
attr: getattr(args, 'station_discovery_%s' % attr)
for attr in ('address', 'port', 'ttl', 'local_only')
if getattr(args, 'station_discovery_%s' % attr) is not None
}
_LOG.info('Starting station discovery.')
# Exit on CTRL+C.
while True:
stations = _discover(**multicast_kwargs)
DashboardPubSub.update_stations(list(stations))
DashboardPubSub.publish_if_new()
time.sleep(args.discovery_interval_s)
if __name__ == '__main__':
main()
| {
"content_hash": "62c17b6014fdb215b2ff7d51ec533ba1",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 95,
"avg_line_length": 30.690821256038646,
"alnum_prop": 0.6604753659688336,
"repo_name": "google/openhtf",
"id": "a1276dacf2397534ab6d11e4acc80ae25cc44f56",
"size": "6929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openhtf/output/servers/dashboard_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "24871"
},
{
"name": "JavaScript",
"bytes": "11873"
},
{
"name": "Python",
"bytes": "1266905"
},
{
"name": "SCSS",
"bytes": "29020"
},
{
"name": "TypeScript",
"bytes": "154488"
}
],
"symlink_target": ""
} |
import geojson.geometry
from typing import Callable, Tuple
from shapely.geometry import LineString
def geojson_linestring_to_shp_linestring(geojson_linestring: geojson.geometry.LineString,
coordinate_convertor: Callable[[float, float], Tuple[float, float]] = None) -> LineString:
points = []
for point in geojson_linestring["geometry"]["coordinates"]:
if coordinate_convertor:
coords = coordinate_convertor(point[1], point[0])
else:
coords = (point[1], point[0])
points.append(coords)
return LineString(points)
| {
"content_hash": "6fd2dcf16c3bc17f0f8f128d862b5536",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 91,
"avg_line_length": 28.05263157894737,
"alnum_prop": 0.7504690431519699,
"repo_name": "aicenter/roadmap-processing",
"id": "e8b20f4826e919d35265da88a8e42e34846884ab",
"size": "1347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roadmaptools/geojson_shp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112683"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from io import BytesIO
from operator import attrgetter, itemgetter
from dateutil.relativedelta import relativedelta
from flask import flash, jsonify, redirect, render_template, request, session
from markupsafe import Markup, escape
from marshmallow import fields
from marshmallow_enum import EnumField
from PIL import Image
from sqlalchemy.orm import joinedload, load_only, subqueryload
from sqlalchemy.orm.exc import StaleDataError
from webargs import validate
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from indico.core import signals
from indico.core.auth import multipass
from indico.core.cache import make_scoped_cache
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.core.errors import UserValueError
from indico.core.marshmallow import mm
from indico.core.notifications import make_email, send_email
from indico.modules.admin import RHAdminBase
from indico.modules.auth import Identity
from indico.modules.auth.models.registration_requests import RegistrationRequest
from indico.modules.auth.util import register_user
from indico.modules.categories import Category
from indico.modules.events import Event
from indico.modules.events.util import serialize_event_for_ical
from indico.modules.users import User, logger, user_management_settings
from indico.modules.users.forms import (AdminAccountRegistrationForm, AdminsForm, AdminUserSettingsForm, MergeForm,
SearchForm, UserEmailsForm, UserPreferencesForm)
from indico.modules.users.models.affiliations import Affiliation
from indico.modules.users.models.emails import UserEmail
from indico.modules.users.models.users import ProfilePictureSource, UserTitle
from indico.modules.users.operations import create_user
from indico.modules.users.schemas import (AffiliationSchema, BasicCategorySchema, FavoriteEventSchema,
UserPersonalDataSchema)
from indico.modules.users.util import (get_avatar_url_from_name, get_gravatar_for_user, get_linked_events,
get_related_categories, get_suggested_categories, get_unlisted_events,
merge_users, search_users, send_avatar, serialize_user, set_user_avatar)
from indico.modules.users.views import (WPUser, WPUserDashboard, WPUserFavorites, WPUserPersonalData, WPUserProfilePic,
WPUsersAdmin)
from indico.util.date_time import now_utc
from indico.util.i18n import _
from indico.util.images import square
from indico.util.marshmallow import HumanizedDate, Principal, validate_with_message
from indico.util.signals import values_from_signal
from indico.util.string import make_unique_token
from indico.web.args import use_args, use_kwargs
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file, url_for
from indico.web.forms.base import FormDefaults
from indico.web.http_api.metadata import Serializer
from indico.web.rh import RH, RHProtected, allow_signed_url
from indico.web.util import is_legacy_signed_url_valid, jsonify_data, jsonify_form, jsonify_template
IDENTITY_ATTRIBUTES = {'first_name', 'last_name', 'email', 'affiliation', 'full_name'}
UserEntry = namedtuple('UserEntry', IDENTITY_ATTRIBUTES | {'profile_url', 'avatar_url', 'user'})
def get_events_in_categories(category_ids, user, limit=10):
"""Get all the user-accessible events in a given set of categories."""
tz = session.tzinfo
today = now_utc(False).astimezone(tz).date()
query = (Event.query
.filter(~Event.is_deleted,
Event.category_chain_overlaps(category_ids),
Event.start_dt.astimezone(session.tzinfo) >= today)
.options(joinedload('category').load_only('id', 'title'),
joinedload('series'),
joinedload('label'),
subqueryload('acl_entries'),
load_only('id', 'category_id', 'start_dt', 'end_dt', 'title', 'access_key',
'protection_mode', 'series_id', 'series_pos', 'series_count',
'label_id', 'label_message'))
.order_by(Event.start_dt, Event.id))
return get_n_matching(query, limit, lambda x: x.can_access(user))
class RHUserBase(RHProtected):
flash_user_status = True
allow_system_user = False
def _process_args(self):
if not session.user:
return
self.user = session.user
if 'user_id' in request.view_args:
self.user = User.get(request.view_args['user_id'])
if self.user is None:
raise NotFound('This user does not exist')
elif request.method == 'GET' and not request.is_xhr and self.flash_user_status:
# Show messages about the user's status if it's a simple GET request
if self.user.is_deleted:
if self.user.merged_into_id is not None:
msg = _('This user has been merged into <a href="{url}">another user</a>.')
flash(Markup(msg).format(url=url_for(request.endpoint, self.user.merged_into_user)), 'warning')
else:
flash(_('This user is marked as deleted.'), 'warning')
if self.user.is_pending:
flash(_('This user is marked as pending, i.e. it has been attached to something but never '
'logged in.'), 'warning')
if not self.allow_system_user and self.user.is_system:
return redirect(url_for('users.user_profile'))
def _check_access(self):
RHProtected._check_access(self)
if not self.user.can_be_modified(session.user):
raise Forbidden('You cannot modify this user.')
class RHUserDashboard(RHUserBase):
management_roles = {'conference_creator', 'conference_chair', 'conference_manager', 'session_manager',
'session_coordinator', 'contribution_manager'}
reviewer_roles = {'paper_manager', 'paper_judge', 'paper_content_reviewer', 'paper_layout_reviewer',
'contribution_referee', 'contribution_editor', 'contribution_reviewer', 'abstract_reviewer',
'track_convener'}
attendance_roles = {'contributor', 'contribution_submission', 'abstract_submitter', 'abstract_person',
'registration_registrant', 'survey_submitter', 'lecture_speaker'}
def _process(self):
self.user.settings.set('suggest_categories', True)
categories = get_related_categories(self.user)
categories_events = []
if categories:
category_ids = {c['categ'].id for c in categories.values()}
categories_events = get_events_in_categories(category_ids, self.user)
from_dt = now_utc(False) - relativedelta(weeks=1, hour=0, minute=0, second=0)
linked_events = [(event, {'management': bool(roles & self.management_roles),
'reviewing': bool(roles & self.reviewer_roles),
'attendance': bool(roles & self.attendance_roles),
'favorited': 'favorited' in roles})
for event, roles in get_linked_events(self.user, from_dt, 10).items()]
return WPUserDashboard.render_template('dashboard.html', 'dashboard',
user=self.user,
categories=categories,
categories_events=categories_events,
suggested_categories=get_suggested_categories(self.user),
linked_events=linked_events,
unlisted_events=get_unlisted_events(self.user))
@allow_signed_url
class RHExportDashboardICS(RHProtected):
def _get_user(self):
return session.user
@use_kwargs({
'from_': HumanizedDate(data_key='from', load_default=lambda: now_utc(False) - relativedelta(weeks=1)),
'include': fields.List(fields.Str(), load_default={'linked', 'categories'}),
'limit': fields.Integer(load_default=100, validate=lambda v: 0 < v <= 500)
}, location='query')
def _process(self, from_, include, limit):
user = self._get_user()
all_events = set()
if 'linked' in include:
all_events |= set(get_linked_events(
user,
from_,
limit=limit,
load_also=('description', 'own_room_id', 'own_venue_id', 'own_room_name', 'own_venue_name')
))
if 'categories' in include and (categories := get_related_categories(user)):
category_ids = {c['categ'].id for c in categories.values()}
all_events |= set(get_events_in_categories(category_ids, user, limit=limit))
all_events = sorted(all_events, key=lambda e: (e.start_dt, e.id))[:limit]
response = {'results': [serialize_event_for_ical(event) for event in all_events]}
serializer = Serializer.create('ics')
return send_file('event.ics', BytesIO(serializer(response)), 'text/calendar')
class RHExportDashboardICSLegacy(RHExportDashboardICS):
def _get_user(self):
user = User.get_or_404(request.view_args['user_id'], is_deleted=False)
if not is_legacy_signed_url_valid(user, request.full_path):
raise BadRequest('Invalid signature')
if user.is_blocked:
raise BadRequest('User blocked')
return user
def _check_access(self):
# disable the usual RHProtected access check; `_get_user` does it all
pass
class RHPersonalData(RHUserBase):
allow_system_user = True
def _process(self):
titles = [{'name': t.name, 'title': t.title} for t in UserTitle if t != UserTitle.none]
user_values = UserPersonalDataSchema().dump(self.user)
current_affiliation = None
if self.user.affiliation_link:
current_affiliation = AffiliationSchema().dump(self.user.affiliation_link)
has_predefined_affiliations = Affiliation.query.filter(~Affiliation.is_deleted).has_rows()
return WPUserPersonalData.render_template('personal_data.html', 'personal_data', user=self.user,
titles=titles, user_values=user_values,
current_affiliation=current_affiliation,
has_predefined_affiliations=has_predefined_affiliations)
class RHPersonalDataUpdate(RHUserBase):
allow_system_user = True
@use_args(UserPersonalDataSchema, partial=True)
def _process(self, changes):
logger.info('Profile of user %r updated by %r: %r', self.user, session.user, changes)
synced_fields = set(changes.pop('synced_fields', self.user.synced_fields))
syncable_fields = {k for k, v in self.user.synced_values.items()
if v or k not in ('first_name', 'last_name')}
# we set this first so these fields are skipped below and only
# get updated in synchronize_data which will flash a message
# informing the user about the changes made by the sync
self.user.synced_fields = synced_fields & syncable_fields
for key, value in changes.items():
if key not in self.user.synced_fields:
setattr(self.user, key, value)
self.user.synchronize_data(refresh=True)
flash(_('Your personal data was successfully updated.'), 'success')
return '', 204
class RHSearchAffiliations(RHProtected):
@use_kwargs({'q': fields.String(load_default='')}, location='query')
def _process(self, q):
exact_match = db.func.lower(Affiliation.name) == q.lower()
q_filter = Affiliation.name.ilike(f'%{q}%') if len(q) > 2 else exact_match
res = (
Affiliation.query
.filter(~Affiliation.is_deleted, q_filter)
.order_by(
exact_match.desc(),
db.func.lower(Affiliation.name).startswith(q.lower()).desc(),
db.func.lower(Affiliation.name)
)
.all())
return AffiliationSchema(many=True).jsonify(res)
class RHProfilePicturePage(RHUserBase):
"""Page to manage the profile picture."""
def _process(self):
return WPUserProfilePic.render_template('profile_picture.html', 'profile_picture',
user=self.user, source=self.user.picture_source.name)
class RHProfilePicturePreview(RHUserBase):
"""Preview the different profile pictures.
This always uses a fresh picture without any caching.
"""
@use_kwargs({'source': EnumField(ProfilePictureSource)}, location='view_args')
def _process(self, source):
if source == ProfilePictureSource.standard:
first_name = self.user.first_name[0].upper() if self.user.first_name else ''
avatar = render_template('users/avatar.svg', bg_color=self.user.avatar_bg_color, text=first_name)
return send_file('avatar.svg', BytesIO(avatar.encode()), mimetype='image/svg+xml',
no_cache=True, inline=True, safe=False)
elif source == ProfilePictureSource.custom:
metadata = self.user.picture_metadata
return send_file('avatar.png', BytesIO(self.user.picture), mimetype=metadata['content_type'],
no_cache=True, inline=True)
else:
gravatar = get_gravatar_for_user(self.user, source == ProfilePictureSource.identicon, size=80)[0]
return send_file('avatar.png', BytesIO(gravatar), mimetype='image/png')
class RHProfilePictureDisplay(RH):
"""Display the user's profile picture."""
def _process_args(self):
self.user = User.get_or_404(request.view_args['user_id'], is_deleted=False)
def _process(self):
return send_avatar(self.user)
class RHSaveProfilePicture(RHUserBase):
"""Update the user's profile picture."""
@use_kwargs({
'source': EnumField(ProfilePictureSource)
})
def _process(self, source):
self.user.picture_source = source
if source == ProfilePictureSource.standard:
self.user.picture = None
self.user.picture_metadata = None
logger.info('Profile picture of user %s removed by %s', self.user, session.user)
return '', 204
if source == ProfilePictureSource.custom:
f = request.files['picture']
try:
pic = Image.open(f)
except OSError:
raise UserValueError(_('You cannot upload this file as profile picture.'))
if pic.format.lower() not in {'jpeg', 'png', 'gif', 'webp'}:
raise UserValueError(_('The file has an invalid format ({format}).').format(format=pic.format))
if pic.mode not in ('RGB', 'RGBA'):
pic = pic.convert('RGB')
pic = square(pic)
if pic.height > 256:
pic = pic.resize((256, 256), resample=Image.BICUBIC)
image_bytes = BytesIO()
pic.save(image_bytes, 'PNG')
image_bytes.seek(0)
set_user_avatar(self.user, image_bytes.read(), f.filename)
else:
content, lastmod = get_gravatar_for_user(self.user, source == ProfilePictureSource.identicon, 256)
set_user_avatar(self.user, content, source.name, lastmod)
logger.info('Profile picture of user %s updated by %s', self.user, session.user)
return '', 204
class RHUserPreferences(RHUserBase):
def _process(self):
extra_preferences = [pref(self.user) for pref in values_from_signal(signals.users.preferences.send(self.user))
if pref.is_active(self.user)]
form_class = UserPreferencesForm
defaults = FormDefaults(**self.user.settings.get_all())
for pref in extra_preferences:
form_class = pref.extend_form(form_class)
pref.extend_defaults(defaults)
form = form_class(obj=defaults)
if form.validate_on_submit():
data = form.data
for pref in extra_preferences:
pref.process_form_data(data)
self.user.settings.set_multi(data)
session.lang = self.user.settings.get('lang')
session.timezone = (self.user.settings.get('timezone') if self.user.settings.get('force_timezone')
else 'LOCAL')
flash(_('Preferences saved'), 'success')
return redirect(url_for('.user_preferences'))
return WPUser.render_template('preferences.html', 'preferences', user=self.user, form=form)
class RHUserFavorites(RHUserBase):
def _process(self):
return WPUserFavorites.render_template('favorites.html', 'favorites', user=self.user)
class RHUserFavoritesAPI(RHUserBase):
def _process_args(self):
RHUserBase._process_args(self)
self.fav_user = (
User.get_or_404(request.view_args['fav_user_id']) if 'fav_user_id' in request.view_args else None
)
def _process_GET(self):
return jsonify(sorted(u.id for u in self.user.favorite_users))
def _process_PUT(self):
self.user.favorite_users.add(self.fav_user)
return jsonify(self.user.id), 201
def _process_DELETE(self):
self.user.favorite_users.discard(self.fav_user)
return '', 204
class RHUserFavoritesCategoryAPI(RHUserBase):
def _process_args(self):
RHUserBase._process_args(self)
self.category = (
Category.get_or_404(request.view_args['category_id']) if 'category_id' in request.view_args else None
)
self.suggestion = (
self.user.suggested_categories.filter_by(category=self.category).first()
if 'category_id' in request.view_args
else None
)
def _process_GET(self):
return jsonify({d.id: BasicCategorySchema().dump(d) for d in self.user.favorite_categories})
def _process_PUT(self):
if self.category not in self.user.favorite_categories:
if not self.category.can_access(self.user):
raise Forbidden()
self.user.favorite_categories.add(self.category)
if self.suggestion:
self.user.suggested_categories.remove(self.suggestion)
return jsonify(success=True)
def _process_DELETE(self):
if self.category in self.user.favorite_categories:
self.user.favorite_categories.discard(self.category)
try:
db.session.flush()
except StaleDataError:
# Deleted in another transaction
db.session.rollback()
suggestion = self.user.suggested_categories.filter_by(category=self.category).first()
if suggestion:
self.user.suggested_categories.remove(suggestion)
return jsonify(success=True)
class RHUserFavoritesEventAPI(RHUserBase):
def _process_args(self):
RHUserBase._process_args(self)
self.event = (
Event.get_or_404(request.view_args['event_id']) if 'event_id' in request.view_args else None
)
def _process_GET(self):
return jsonify({e.id: FavoriteEventSchema().dump(e) for e in self.user.favorite_events if not e.is_deleted})
def _process_PUT(self):
if self.event not in self.user.favorite_events:
if not self.event.can_access(self.user):
raise Forbidden()
self.user.favorite_events.add(self.event)
signals.users.favorite_event_added.send(self.user, event=self.event)
return jsonify(success=True)
def _process_DELETE(self):
if self.event in self.user.favorite_events:
self.user.favorite_events.discard(self.event)
try:
db.session.flush()
except StaleDataError:
# Deleted in another transaction
db.session.rollback()
signals.users.favorite_event_removed.send(self.user, event=self.event)
return jsonify(success=True)
class RHUserSuggestionsRemove(RHUserBase):
def _process(self):
suggestion = self.user.suggested_categories.filter_by(category_id=request.view_args['category_id']).first()
if suggestion:
suggestion.is_ignored = True
return jsonify(success=True)
class RHUserEmails(RHUserBase):
def _send_confirmation(self, email):
token_storage = make_scoped_cache('confirm-email')
data = {'email': email, 'user_id': self.user.id}
token = make_unique_token(lambda t: not token_storage.get(t))
token_storage.set(token, data, timeout=86400)
send_email(make_email(email, template=get_template_module('users/emails/verify_email.txt',
user=self.user, email=email, token=token)))
def _process(self):
form = UserEmailsForm()
if form.validate_on_submit():
self._send_confirmation(form.email.data)
flash(_('We have sent an email to {email}. Please click the link in that email within 24 hours to '
'confirm your new email address.').format(email=form.email.data), 'success')
return redirect(url_for('.user_emails'))
return WPUser.render_template('emails.html', 'emails', user=self.user, form=form)
class RHUserEmailsVerify(RHUserBase):
flash_user_status = False
token_storage = make_scoped_cache('confirm-email')
def _validate(self, data):
if not data:
flash(_('The verification token is invalid or expired.'), 'error')
return False, None
user = User.get(data['user_id'])
if not user or user != self.user:
flash(_('This token is for a different Indico user. Please login with the correct account'), 'error')
return False, None
existing = UserEmail.query.filter_by(is_user_deleted=False, email=data['email']).first()
if existing and not existing.user.is_pending:
if existing.user == self.user:
flash(_('This email address is already attached to your account.'))
else:
flash(_('This email address is already in use by another account.'), 'error')
return False, existing.user
return True, existing.user if existing else None
def _process(self):
token = request.view_args['token']
data = self.token_storage.get(token)
valid, existing = self._validate(data)
if valid:
self.token_storage.delete(token)
if existing and existing.is_pending:
logger.info('Found pending user %s to be merged into %s', existing, self.user)
# If the pending user has missing names, copy them from the active one
# to allow it to be marked as not pending and deleted during the merge.
existing.first_name = existing.first_name or self.user.first_name
existing.last_name = existing.last_name or self.user.last_name
merge_users(existing, self.user)
flash(_("Merged data from existing '{}' identity").format(existing.email))
existing.is_pending = False
self.user.secondary_emails.add(data['email'])
signals.users.email_added.send(self.user, email=data['email'], silent=False)
flash(_('The email address {email} has been added to your account.').format(email=data['email']), 'success')
return redirect(url_for('.user_emails'))
class RHUserEmailsDelete(RHUserBase):
def _process(self):
email = request.view_args['email']
if email in self.user.secondary_emails:
self.user.secondary_emails.remove(email)
return jsonify(success=True)
class RHUserEmailsSetPrimary(RHUserBase):
def _process(self):
from .tasks import update_gravatars
email = request.form['email']
if email in self.user.secondary_emails:
self.user.make_email_primary(email)
db.session.commit()
if self.user.picture_source in (ProfilePictureSource.gravatar, ProfilePictureSource.identicon):
update_gravatars.delay(self.user)
flash(_('Your primary email was updated successfully.'), 'success')
if 'email' in self.user.synced_fields:
self.user.synced_fields = self.user.synced_fields - {'email'}
flash(_('Email address synchronization has been disabled since you manually changed your primary'
' email address.'), 'warning')
return redirect(url_for('.user_emails'))
class RHAdmins(RHAdminBase):
"""Show Indico administrators."""
def _process(self):
admins = set(User.query
.filter_by(is_admin=True, is_deleted=False)
.order_by(db.func.lower(User.first_name), db.func.lower(User.last_name)))
form = AdminsForm(admins=admins)
if form.validate_on_submit():
added = form.admins.data - admins
removed = admins - form.admins.data
for user in added:
user.is_admin = True
logger.warning('Admin rights granted to %r by %r [%s]', user, session.user, request.remote_addr)
flash(_('Admin added: {name} ({email})').format(name=user.name, email=user.email), 'success')
for user in removed:
user.is_admin = False
logger.warning('Admin rights revoked from %r by %r [%s]', user, session.user, request.remote_addr)
flash(_('Admin removed: {name} ({email})').format(name=user.name, email=user.email), 'success')
return redirect(url_for('.admins'))
return WPUsersAdmin.render_template('admins.html', 'admins', form=form)
class RHUsersAdmin(RHAdminBase):
"""Admin users overview."""
def _process(self):
form = SearchForm(obj=FormDefaults(exact=True))
form_data = form.data
search_results = None
num_of_users = User.query.count()
num_deleted_users = User.query.filter_by(is_deleted=True).count()
if form.validate_on_submit():
search_results = []
exact = form_data.pop('exact')
include_deleted = form_data.pop('include_deleted')
include_pending = form_data.pop('include_pending')
external = form_data.pop('external')
form_data = {k: v for (k, v) in form_data.items() if v and v.strip()}
matches = search_users(exact=exact, include_deleted=include_deleted, include_pending=include_pending,
include_blocked=True, external=external, allow_system_user=True, **form_data)
for entry in matches:
if isinstance(entry, User):
search_results.append(UserEntry(
avatar_url=entry.avatar_url,
profile_url=url_for('.user_profile', entry),
user=entry,
**{k: getattr(entry, k) for k in IDENTITY_ATTRIBUTES}
))
else:
if not entry.data['first_name'] and not entry.data['last_name']:
full_name = '<no name>'
initial = '?'
else:
full_name = f'{entry.data["first_name"]} {entry.data["last_name"]}'.strip()
initial = full_name[0]
search_results.append(UserEntry(
avatar_url=url_for('assets.avatar', name=initial),
profile_url=None,
user=None,
full_name=full_name,
**{k: entry.data.get(k) for k in (IDENTITY_ATTRIBUTES - {'full_name'})}
))
search_results.sort(key=attrgetter('full_name'))
num_reg_requests = RegistrationRequest.query.count()
return WPUsersAdmin.render_template('users_admin.html', 'users', form=form, search_results=search_results,
num_of_users=num_of_users, num_deleted_users=num_deleted_users,
num_reg_requests=num_reg_requests)
class RHUsersAdminSettings(RHAdminBase):
"""Manage global user-related settings."""
def _process(self):
form = AdminUserSettingsForm(obj=FormDefaults(**user_management_settings.get_all()))
if form.validate_on_submit():
user_management_settings.set_multi(form.data)
return jsonify_data(flash=False)
return jsonify_form(form)
class RHUsersAdminCreate(RHAdminBase):
"""Create user (admin)."""
def _process(self):
form = AdminAccountRegistrationForm()
if form.validate_on_submit():
data = form.data
if data.pop('create_identity', False):
identity = Identity(provider='indico', identifier=data.pop('username'), password=data.pop('password'))
else:
identity = None
data.pop('username', None)
data.pop('password', None)
user = create_user(data.pop('email'), data, identity, from_moderation=True)
msg = Markup('{} <a href="{}">{}</a>').format(
escape(_('The account has been created.')),
url_for('users.user_profile', user),
escape(_('Show details'))
)
flash(msg, 'success')
return jsonify_data()
return jsonify_template('users/users_admin_create.html', form=form)
def _get_merge_problems(source, target):
errors = []
warnings = []
if source == target:
errors.append(_('Users are the same!'))
if (source.first_name.strip().lower() != target.first_name.strip().lower() or
source.last_name.strip().lower() != target.last_name.strip().lower()):
warnings.append(_("Users' names seem to be different!"))
if source.is_pending:
warnings.append(_('Source user has never logged in to Indico!'))
if target.is_pending:
warnings.append(_('Target user has never logged in to Indico!'))
if source.is_blocked:
warnings.append(_('Source user is blocked!'))
if target.is_blocked:
warnings.append(_('Target user is blocked!'))
if source.is_deleted:
errors.append(_('Source user has been deleted!'))
if target.is_deleted:
errors.append(_('Target user has been deleted!'))
if source.is_admin:
warnings.append(_('Source user is an administrator!'))
if target.is_admin:
warnings.append(_('Target user is an administrator!'))
if source.is_admin and not target.is_admin:
errors.append(_("Source user is an administrator but target user isn't!"))
return errors, warnings
class RHUsersAdminMerge(RHAdminBase):
"""Merge users (admin)."""
def _process(self):
form = MergeForm()
if form.validate_on_submit():
source = form.source_user.data
target = form.target_user.data
errors, warnings = _get_merge_problems(source, target)
if errors:
raise BadRequest(_('Merge aborted due to failed sanity check'))
if warnings:
logger.info('User %s initiated merge of %s into %s (with %d warnings)',
session.user, source, target, len(warnings))
else:
logger.info('User %s initiated merge of %s into %s', session.user, source, target)
merge_users(source, target)
flash(_('The users have been successfully merged.'), 'success')
return redirect(url_for('.user_profile', user_id=target.id))
return WPUsersAdmin.render_template('users_merge.html', 'users', form=form)
class RHUsersAdminMergeCheck(RHAdminBase):
@use_kwargs({
'source': Principal(allow_external_users=True, required=True),
'target': Principal(allow_external_users=True, required=True),
}, location='query')
def _process(self, source, target):
errors, warnings = _get_merge_problems(source, target)
return jsonify(errors=errors, warnings=warnings, source=serialize_user(source), target=serialize_user(target))
class RHRegistrationRequestList(RHAdminBase):
"""List all registration requests."""
def _process(self):
requests = RegistrationRequest.query.order_by(RegistrationRequest.email).all()
return WPUsersAdmin.render_template('registration_requests.html', 'users', pending_requests=requests)
class RHRegistrationRequestBase(RHAdminBase):
"""Base class to process a registration request."""
def _process_args(self):
RHAdminBase._process_args(self)
self.request = RegistrationRequest.get_or_404(request.view_args['request_id'])
class RHAcceptRegistrationRequest(RHRegistrationRequestBase):
"""Accept a registration request."""
def _process(self):
user, identity = register_user(self.request.email, self.request.extra_emails, self.request.user_data,
self.request.identity_data, self.request.settings)
tpl = get_template_module('users/emails/registration_request_accepted.txt', user=user)
send_email(make_email(self.request.email, template=tpl))
flash(_('The request has been approved.'), 'success')
return jsonify_data()
class RHRejectRegistrationRequest(RHRegistrationRequestBase):
"""Reject a registration request."""
def _process(self):
db.session.delete(self.request)
tpl = get_template_module('users/emails/registration_request_rejected.txt', req=self.request)
send_email(make_email(self.request.email, template=tpl))
flash(_('The request has been rejected.'), 'success')
return jsonify_data()
class UserSearchResultSchema(mm.SQLAlchemyAutoSchema):
affiliation_id = fields.Integer(attribute='_affiliation.affiliation_id')
affiliation_meta = fields.Nested(AffiliationSchema, attribute='_affiliation.affiliation_link')
title = EnumField(UserTitle, attribute='_title')
class Meta:
model = User
fields = ('id', 'identifier', 'email', 'affiliation', 'affiliation_id', 'affiliation_meta',
'full_name', 'first_name', 'last_name', 'avatar_url', 'title')
search_result_schema = UserSearchResultSchema()
class RHUserSearch(RHProtected):
"""Search for users based on given criteria."""
def _serialize_pending_user(self, entry):
first_name = entry.data.get('first_name') or ''
last_name = entry.data.get('last_name') or ''
full_name = f'{first_name} {last_name}'.strip() or 'Unknown'
affiliation = entry.data.get('affiliation') or ''
affiliation_data = entry.data.get('affiliation_data')
email = entry.data['email'].lower()
ext_id = f'{entry.provider.name}:{entry.identifier}'
# IdentityInfo from flask-multipass does not have `avatar_url`
avatar_url = get_avatar_url_from_name(first_name)
# detailed data to put in redis to create a pending user if needed
self.externals[ext_id] = {
'first_name': first_name,
'last_name': last_name,
'email': email,
'affiliation': affiliation,
'affiliation_data': affiliation_data,
'phone': entry.data.get('phone') or '',
'address': entry.data.get('address') or '',
}
# simple data for the search results
return {
'_ext_id': ext_id,
'id': None,
'identifier': f'ExternalUser:{ext_id}',
'email': email,
'affiliation': affiliation,
'affiliation_id': -1 if affiliation_data else None,
'affiliation_meta': (AffiliationSchema().dump(affiliation_data) | {'id': -1}) if affiliation_data else None,
'full_name': full_name,
'first_name': first_name,
'last_name': last_name,
'avatar_url': avatar_url
}
def _serialize_entry(self, entry):
if isinstance(entry, User):
return search_result_schema.dump(entry)
else:
return self._serialize_pending_user(entry)
def _process_pending_users(self, results):
cache = make_scoped_cache('external-user')
for entry in results:
ext_id = entry.pop('_ext_id', None)
if ext_id is not None:
cache.set(ext_id, self.externals[ext_id], timeout=86400)
@use_kwargs({
'first_name': fields.Str(validate=validate.Length(min=1)),
'last_name': fields.Str(validate=validate.Length(min=1)),
'email': fields.Str(validate=lambda s: len(s) > 3),
'affiliation': fields.Str(validate=validate.Length(min=1)),
'exact': fields.Bool(load_default=False),
'external': fields.Bool(load_default=False),
'favorites_first': fields.Bool(load_default=False)
}, validate=validate_with_message(
lambda args: args.keys() & {'first_name', 'last_name', 'email', 'affiliation'},
'No criteria provided'
), location='query')
def _process(self, exact, external, favorites_first, **criteria):
matches = search_users(exact=exact, include_pending=True, external=external, **criteria)
self.externals = {}
results = sorted((self._serialize_entry(entry) for entry in matches), key=itemgetter('full_name', 'email'))
if favorites_first:
favorites = {u.id for u in session.user.favorite_users}
results.sort(key=lambda x: x['id'] not in favorites)
total = len(results)
results = results[:10]
self._process_pending_users(results)
return jsonify(users=results, total=total)
class RHUserSearchInfo(RHProtected):
def _process(self):
external_users_available = any(auth.supports_search for auth in multipass.identity_providers.values())
return jsonify(external_users_available=external_users_available)
class RHUserBlock(RHUserBase):
def _check_access(self):
RHUserBase._check_access(self)
if not session.user.is_admin:
raise Forbidden
def _process_PUT(self):
if self.user == session.user:
raise Forbidden(_('You cannot block yourself'))
self.user.is_blocked = True
logger.info('User %s blocked %s', session.user, self.user)
flash(_('{name} has been blocked.').format(name=self.user.name), 'success')
return jsonify(success=True)
def _process_DELETE(self):
self.user.is_blocked = False
logger.info('User %s unblocked %s', session.user, self.user)
flash(_('{name} has been unblocked.').format(name=self.user.name), 'success')
return jsonify(success=True)
| {
"content_hash": "4db5517298e1d25636888769b701ca57",
"timestamp": "",
"source": "github",
"line_count": 866,
"max_line_length": 120,
"avg_line_length": 45.45842956120092,
"alnum_prop": 0.6167094266771661,
"repo_name": "DirkHoffmann/indico",
"id": "6e1002667f05c9adab9c12a7e58a7c38a4d91684",
"size": "39581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/users/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""Console functionality for command line tools."""
import codecs
import getpass
import shutil
import sys
import textwrap
from django.core.exceptions import ValidationError
from django.core.management.color import color_style, no_style
from django.utils import termcolors
_console = None
class _StyledWrapperIndent:
"""Wraps styled text to help provide textwrap with indent lengths.
:py:mod:`textwrap` calculates the width of the indent strings to figure
out how many characters can fit on a line, but it's completely unaware of
ANSI characters. It reduces the available width on the line by the number
of invisible ANSI control characters.
This helps wrap and style indent text in a way that allows text wrapping
to use the actual visible width of the indent rather than the raw
character width.
Version Added:
5.0
"""
def __init__(self, text, style):
"""Initialize the indent text.
Args:
text (str):
The text to style.
style (callable):
The function used to style the text, or ``None`` if the text
will not be styled.
"""
self._length = len(text)
if style:
self._text = style(text)
else:
self._text = text
def __add__(self, other):
"""Add another string to the contained string.
This will be used when building the content for a line.
Args:
other (str):
The string to add onto the contained stringr.
Returns:
str:
The combined length.
"""
return str(self._text) + other
def __len__(self):
"""Return the visible length of the string.
This will be used for available text width calculations.
Returns:
int:
The visible length of the string.
"""
return self._length
class Console(object):
"""Utilities for displaying output to the console.
This takes care of cleanly outputting various forms of content (text,
notes, warnings, errors, itemized lists, and more) to the console.
Callers should construct this by calling :py:func:`init_console`.
Version Added:
4.0
"""
#: Standard text prompts.
PROMPT_TYPE_TEXT = 'text'
#: Password prompts.
PROMPT_TYPE_PASSWORD = 'password'
#: Yes/No prompts.
PROMPT_TYPE_YES_NO = 'yes_no'
def __init__(self, allow_color=False, stdout=sys.stdout,
stderr=sys.stderr, default_text_padding=0):
"""Initialize the console.
Args:
allow_color (bool, optional):
Whether to use color in any console output. This applies to
headers, notes, warnings, errors, and progress steps.
stdout (io.IOBase):
The stream to output standard text to.
stderr (io.IOBase):
The stream to output errors to.
"""
writer = codecs.getwriter('utf-8')
self._allow_color = allow_color
try:
# sys.stdout with Python 3.
self.stdout = writer(stdout.buffer)
except AttributeError:
# sys.stdout with Python 2, or some other stream.
self.stdout = writer(stdout)
try:
# sys.stderr with Python 3.
self.stderr = writer(stderr.buffer)
except AttributeError:
# sys.stderr with Python 2, or some other stream.
self.stderr = writer(stderr)
self.default_text_padding = default_text_padding
# Get the terminal width in order to best fit wrapped content.
term_width = 79
if hasattr(shutil, 'get_terminal_size'):
try:
# get_terminal_size can report 0, 0 if run from pseudo-terminal
term_width = shutil.get_terminal_size()[0] or term_width
except OSError:
pass
self.term_width = term_width
self.header_sep = '\u2014' * self.term_width
self._restyle_console()
@property
def allow_color(self):
"""Whether to use color for output.
Type:
bool
"""
return self._allow_color
@allow_color.setter
def allow_color(self, allow_color):
if self._allow_color is not allow_color:
self._allow_color = allow_color
self._restyle_console()
def make_text_wrapper(self, prefix='', prefix_style=None, left_padding=0,
right_padding=None):
"""Return a new TextWrapper.
The resulting :py:class:`textwrap.TextWrapper` will be tailored to the
terminal width, and will make use of any provided prefix, style, and
padding.
Args:
prefix (unicode, optional):
A prefix for the first line in the wrapped content.
prefix_style (callable, optional):
The style function used to style the prefix.
left_padding (int, optional):
Padding to apply to the left of all lines.
right_padding (int, optional):
Padding to apply to the right of all lines. This defaults to
the value of ``left_padding``.
Returns:
textwrap.TextWrapper:
The resulting text wrapper.
"""
left_indent_len = left_padding + len(prefix)
if right_padding is None:
right_padding = left_padding
return textwrap.TextWrapper(
initial_indent=_StyledWrapperIndent(
'%s%s' % (' ' * left_padding, prefix),
prefix_style),
subsequent_indent=' ' * left_indent_len,
break_long_words=False,
width=self.term_width - right_padding)
def wrap_text(self, text, indent=None, wrapper=None):
"""Return a paragraph of text wrapped to the terminal width.
Args:
text (unicode):
The text to wrap.
indent (unicode, optional):
A custom indentation string.
wrapper (textwrap.TextWrapper, optional):
A specific text wrapper to use. Defaults to the standard
text wrapper for the console.
Returns:
unicode:
The wrapped text.
"""
if wrapper is None:
wrapper = self.text_wrapper
if indent is None:
result = wrapper.fill(text)
else:
old_initial_indent = wrapper.initial_indent
old_subsequent_indent = wrapper.subsequent_indent
old_width = wrapper.width
wrapper.initial_indent = indent
wrapper.subsequent_indent = indent
wrapper.width = self.term_width
result = wrapper.fill(text)
wrapper.initial_indent = old_initial_indent
wrapper.subsequent_indent = old_subsequent_indent
wrapper.width = old_width
return result
def print(self, text='', wrap=True, wrapper=None, style=None,
trailing_newline=True):
"""Display a block of text to the user.
Args:
text (unicode):
The text to display.
wrap (bool, optional):
Whether to wrap the text. Any newlines will result in new
paragraphs.
wrapper (textwrap.TextWrapper, optional):
A specific text wrapper to use. Defaults to the standard
text wrapper for the console.
style (callable, optional):
The style function used to style the text.
trailing_newline (bool, optional):
Whether to include a trailing newline at the end.
"""
if style is None:
style = self._plain_style
if wrap:
if wrapper is None:
wrapper = self.text_wrapper
indent = None
for i, paragraph in enumerate(text.strip().splitlines()):
if i > 0:
self.stdout.write('\n\n')
indent = wrapper.subsequent_indent
self.stdout.write(style(self.wrap_text(paragraph,
wrapper=wrapper,
indent=indent)))
else:
for line in text.splitlines(True):
self.stdout.write('%s%s' % (' ' * self.default_text_padding,
style(line)))
if trailing_newline:
self.stdout.write('\n')
def note(self, text, leading_newlines=True, trailing_newlines=True):
"""Display a block containing an important note.
Args:
text (unicode):
The text to display.
leading_newlines (bool, optional):
Whether to show 2 newlines before the text.
trailing_newlines (bool, optional):
Whether to show 1 newline after the text.
"""
if leading_newlines:
self.print()
self.print(text,
wrapper=self.note_wrapper)
if trailing_newlines:
self.print()
def warning(self, text, leading_newlines=True, trailing_newlines=True):
"""Display a block containing a warning.
Args:
text (unicode):
The text to display.
leading_newlines (bool, optional):
Whether to show 2 newlines before the text.
trailing_newlines (bool, optional):
Whether to show 1 newline after the text.
"""
if leading_newlines:
self.print()
self.print(text,
wrapper=self.warning_wrapper)
if trailing_newlines:
self.print()
def error(self, text, leading_newlines=True, trailing_newlines=True):
"""Display a block containing a warning.
Args:
text (unicode):
The text to display.
leading_newlines (bool, optional):
Whether to show 2 newlines before the text.
trailing_newlines (bool, optional):
Whether to show 1 newline after the text.
"""
if leading_newlines:
self.print()
self.print(text,
wrapper=self.error_wrapper)
if trailing_newlines:
self.print()
def header(self, title, leading_newlines=True, trailing_newlines=True):
"""Display a header.
Args:
title (unicode):
The header title.
leading_newlines (bool, optional):
Whether to show 2 newlines before the header.
trailing_newlines (bool, optional):
Whether to show 1 newline after the header.
"""
if leading_newlines:
self.print()
self.print()
self.print(self.header_sep,
style=self.header_sep_style,
wrapper=self.header_sep_wrapper)
self.print(title,
style=self.header_sep_style,
wrapper=self.header_wrapper)
self.print(self.header_sep,
style=self.header_sep_style,
wrapper=self.header_sep_wrapper)
if trailing_newlines:
self.print()
def prompt_input(self, prompt, prompt_type=PROMPT_TYPE_TEXT,
default=None, optional=False, strip=True,
validate_func=None):
"""Prompt the user for input.
Args:
prompt (unicode):
The text prompting for input.
prompt_type (unicode, optional):
The type of input to prompt for. This is one of:
* :py:attr:`PROMPT_TYPE_TEXT`
* :py:attr:`PROMPT_TYPE_PASSWORD`
* :py:attr:`PROMPT_TYPE_YES_NO`
default (bool or unicode, optional):
The default value to show and use, if an explicit value isn't
provided by the user.
For yes/no prompts, this should be a boolean. For all else,
a string.
optional (bool, optional):
Whether the prompt is optional and can be skipped by omitting
a value.
strip (bool, optional):
Whether to strip the provided input.
validate_func (callable, optional):
An optional function for determining if input is valid. This
takes the input as a parameter and raises a
:py:class:`django.core.exceptions.ValidationError` if invalid.
.. code-block:: python
def _is_valid(value):
if value != 'abc':
raise ValidationError('bad!')
Returns:
unicode:
The resulting inputted value.
"""
if prompt_type == self.PROMPT_TYPE_YES_NO:
if default is True:
prompt = '%s [Y/n]' % prompt
elif default is False:
prompt = '%s [y/N]' % prompt
default = False
else:
prompt = '%s [y/n]' % prompt
elif default:
self.print()
self.print('The default is "%s"' % default)
prompt = '%s [%s]' % (prompt, default)
elif optional:
prompt = '%s (optional)' % prompt
self.print()
prompt = self.prompt_style('%s: ' % prompt)
value = None
while not value:
self.print(prompt, trailing_newline=False)
self.stdout.flush()
if prompt_type == self.PROMPT_TYPE_PASSWORD:
value = getpass.getpass(str(''),
stream=self.stdout)
else:
value = input()
if strip:
value = value.strip()
if not value:
if default:
value = default
elif optional:
break
if validate_func is not None:
try:
validate_func(value)
except ValidationError as e:
for error in e.messages:
self.error(error)
value = None
continue
if prompt_type == self.PROMPT_TYPE_YES_NO:
if isinstance(value, bool):
# This came from the 'default' value.
norm_value = value
else:
assert isinstance(value, str)
norm_value = value.lower()
if norm_value not in (True, False, 'y', 'n', 'yes', 'no'):
self.error('Must specify one of Y/y/yes or N/n/no.')
value = None
continue
else:
value = norm_value in (True, 'y', 'yes')
break
elif not value:
self.error('An answer is required.')
return value
def prompt_choice(self, prompt, choices):
"""Prompt the user for a choice from a list.
Args:
prompt (unicode):
The text prompting for a choice.
choices (list of dict):
The list of choices to present. Each entry is a dictionary
with the following keys:
``text`` (:py:class:`unicode`):
The text for the choice.
``description`` (:py:class:`unicode`, optional):
A description of the choice.
``enabled`` (:py:class:`bool`, optional):
Whether the option is enabled/visible.
Returns:
object:
The resulting choice.
"""
self.print()
self.print('You can type either the name or the number from the '
'list below.')
self.print()
prompt_style = self.prompt_style
valid_choices = []
i = 0
for choice in choices:
if choice.get('enabled', True):
text = choice['text']
self.print(
'%s %s %s\n'
% (prompt_style('(%d)' % (i + 1)),
text,
choice.get('description', '')))
valid_choices.append(text)
i += 1
self.print()
prompt = self.prompt_style('%s: ' % prompt)
choice = None
while not choice:
self.print(prompt, trailing_newline=False)
choice = input()
if choice not in valid_choices:
try:
i = int(choice) - 1
if 0 <= i < len(valid_choices):
choice = valid_choices[i]
break
except ValueError:
pass
self.error("'%s' is not a valid option." % choice)
choice = None
return choice
def itemized_list(self, items, title=''):
"""Display a list of items.
Args:
items (list of unicode):
The list of items to show.
title (unicode, optional):
An optional title to show above the list.
"""
self.print()
if title:
self.print('%s:' % title)
self.print()
wrapper = self.item_wrapper
for item in items:
self.print(item, wrapper=wrapper)
def progress_step(self, text, func, step_num=None, total_steps=None):
"""Display one step of a multi-step operation.
This will indicate when it's starting and when it's complete.
If both ``step_num`` and ``total_steps`` are provided, the step
text will include a prefix showing what step it's on and how many
there are total.
Args:
text (unicode):
The step text to display.
func (callable):
The function to call to execute the step.
step_num (int, optional):
The 1-based step number.
total_steps (int, optional):
The total number of steps.
"""
assert callable(func)
if step_num is not None and total_steps is not None:
text = '[%s/%s] %s' % (step_num, total_steps, text)
self.print('%s ... ' % text,
trailing_newline=False,
wrap=False)
try:
func()
self.stdout.write(self.style.SUCCESS('OK'))
except Exception as e:
self.stdout.write('%s %s' % (self.style.ERROR('ERROR:'), e))
self.stdout.write('\n')
def _plain_style(self, text):
"""Return text as-is, without any styling.
Args:
text (unicode):
The text to "style".
Returns:
unicode:
The provided text.
"""
return text
def _restyle_console(self):
"""Restyle console output.
This will create/re-create the output styles, based on the terminal
size and whether color is allowed.
"""
# Recompute the styles, based on whether color is allowed.
if self.allow_color:
self.style = color_style()
self.header_style = termcolors.make_style(fg='yellow',
bg='black',
opts=('bold',))
self.header_sep_style = termcolors.make_style(fg='yellow',
bg='black')
self.prompt_style = termcolors.make_style(opts=('bold',))
else:
self.style = no_style()
plain_style = self._plain_style
self.header_style = plain_style
self.header_sep_style = plain_style
self.prompt_style = plain_style
# Rebuild the text wrappers.
text_padding = self.default_text_padding
self.header_wrapper = self.make_text_wrapper(
left_padding=1,
right_padding=1)
self.header_sep_wrapper = self.make_text_wrapper()
self.text_wrapper = self.make_text_wrapper(
left_padding=text_padding,
right_padding=text_padding)
self.note_wrapper = self.make_text_wrapper(
prefix='Note: ',
prefix_style=self.style.WARNING,
left_padding=text_padding,
right_padding=text_padding)
self.warning_wrapper = self.make_text_wrapper(
prefix='Warning: ',
prefix_style=self.style.WARNING,
left_padding=text_padding,
right_padding=text_padding)
self.error_wrapper = self.make_text_wrapper(
prefix='[!] ',
prefix_style=self.style.ERROR,
left_padding=text_padding,
right_padding=text_padding)
self.item_wrapper = self.make_text_wrapper(
prefix='* ',
left_padding=text_padding,
right_padding=text_padding)
def init_console(*args, **kwargs):
"""Initialize the console.
This can only be called once.
Args:
**kwargs (dict):
Keyword arguments to pass to :py:class:`Console`.
Returns:
Console:
The resulting console instance.
"""
global _console
assert _console is None, 'init_console() was already called.'
_console = Console(*args, **kwargs)
return _console
def uninit_console():
"""Uninitialize the console."""
global _console
assert _console is not None, 'init_console() was never called.'
_console = None
def get_console():
"""Return the console instance.
Returns:
Console:
The initialized console, or ``None`` if not yet initialized.
"""
return _console
| {
"content_hash": "661b769353326e7041ba4290b28dfe4f",
"timestamp": "",
"source": "github",
"line_count": 733,
"max_line_length": 79,
"avg_line_length": 30.289222373806275,
"alnum_prop": 0.5234213133951896,
"repo_name": "reviewboard/reviewboard",
"id": "42b30513db6f10af19f86d451bdb14ec13362cb8",
"size": "22202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/cmdline/utils/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from rest_framework.test import APIClient
from django.core.urlresolvers import reverse
from .models import Company, Team
from users.models import User
class CompanyTest(TestCase):
def setUp(self):
self.client = APIClient()
def test_api_can_create_team(self):
old_count = Team.objects.count()
team_data = {"name": "Imaginary team"}
self.response = self.client.post(
reverse("teams", kwargs={"company_id": 0}),
team_data,
format="json"
)
self.assertEqual(self.response.status_code, 201)
new_count = Team.objects.count()
self.assertNotEqual(old_count, new_count)
def test_api_can_delete_team(self):
old_count = Team.objects.count()
#creating user
user_data = {"full_name": "Imaginary user", "email": "test_users@email.com",
"is_admin": True}
self.response = self.client.post(
reverse("admins"),
user_data,
format="json"
)
self.assertEqual(self.response.status_code, 201)
# creating company
company_data = {"full_name": "company", "mock_owner": 0}
self.response = self.client.post(
reverse("companies"),
company_data,
format="json"
)
self.assertEqual(self.response.status_code, 201)
# creating team
team_data = {"name": "Imaginary team", "company": 0}
self.response = self.client.post(
reverse("teams"),
team_data,
format="json"
)
#deleting team
before_delete_count = Team.objects.count()
self.assertNotEqual(old_count, before_delete_count)
self.response = self.client.delete(
reverse("get_team"),
team_data,
format="json"
)
self.assertEqual(self.response.status_code, 200)
after_delete_count = Team.objects.count()
self.assertNotEqual(old_count, after_delete_count)
def test_api_can_delete_company():
# checking amount of company
old_count = Company.objects.count()
# creating mock_owner
mock_owner = User(full_name = "mock_owner",is_admin= True)
mock_owner.save()
# creating editor
mock_editor = User(full_name="mock_editor", is_admin=True)
mock_editor.save()
# creating company
company_data= {"name": "Imaginary company",
"owner_id": "1",
"editor_id": "2"}
self.response = self.client.post(
reverse("companies"),
team_data,
format="json"
)
# checking new amount of company
self.assertEqual(self.response.status_code, 200)
new_count = Company.objects.count()
self.assertNotEqual(old_count, new_count)
self.response = self.client.delete (
reverse("get_company", kwargs={"company_id": 1})
)
# get amount of company after delete
after_delete_count = Company.objects.count()
self.assertNotEqual(new_count, after_delete_count)
self.assertEqual(self.response.status_code, 204)
| {
"content_hash": "c662cbcbb7a65b94b4d6bdef58a54fd8",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 84,
"avg_line_length": 30.7,
"alnum_prop": 0.5537459283387622,
"repo_name": "teamworkquality/twq-app",
"id": "190968a80e782a54ebb23262b6ba39f789367615",
"size": "3377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/companies/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "172"
},
{
"name": "HTML",
"bytes": "204"
},
{
"name": "JavaScript",
"bytes": "18302"
},
{
"name": "Python",
"bytes": "68605"
},
{
"name": "Shell",
"bytes": "170"
},
{
"name": "Vue",
"bytes": "15713"
}
],
"symlink_target": ""
} |
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| {
"content_hash": "5a355b345b39bd138baf4546c987f9a8",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 26.5,
"alnum_prop": 0.6792452830188679,
"repo_name": "boooka/GeoPowerOff",
"id": "208a0fa7ddaaae718bce93e13ce7048ac1a4b397",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/bin/django-admin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "38253"
},
{
"name": "CSS",
"bytes": "102019"
},
{
"name": "JavaScript",
"bytes": "121188"
},
{
"name": "Python",
"bytes": "7232605"
},
{
"name": "Shell",
"bytes": "3777"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from unittest import mock
from senlin.common import consts
from senlin.common import exception as exc
from senlin.common import scaleutils as su
from senlin.engine import cluster as cm
from senlin.policies import base as pb
from senlin.policies import region_placement as rp
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class TestRegionPlacementPolicy(base.SenlinTestCase):
def setUp(self):
super(TestRegionPlacementPolicy, self).setUp()
self.context = utils.dummy_context()
self.spec = {
'type': 'senlin.policy.region_placement',
'version': '1.0',
'properties': {
'regions': [
{'name': 'R1', 'weight': 100, 'cap': 50},
{'name': 'R2', 'weight': 50, 'cap': 50},
{'name': 'R3', 'weight': 30, 'cap': -1},
{'name': 'R4', 'weight': 20, 'cap': -1}
]
}
}
def test_policy_init(self):
policy = rp.RegionPlacementPolicy('test-policy', self.spec)
self.assertIsNone(policy.id)
self.assertIsNone(policy. _keystoneclient)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.region_placement-1.0', policy.type)
expected = {
'R1': {
'weight': 100,
'cap': 50
},
'R2': {
'weight': 50,
'cap': 50,
},
'R3': {
'weight': 30,
'cap': -1,
},
'R4': {
'weight': 20,
'cap': -1,
}
}
self.assertEqual(expected, policy.regions)
@mock.patch.object(pb.Policy, 'validate')
def test_validate_okay(self, mock_base_validate):
policy = rp.RegionPlacementPolicy('test-policy', self.spec)
kc = mock.Mock()
kc.validate_regions.return_value = ['R1', 'R2', 'R3', 'R4']
policy._keystoneclient = kc
ctx = mock.Mock(user='U1', project='P1')
res = policy.validate(ctx, True)
self.assertTrue(res)
mock_base_validate.assert_called_once_with(ctx, True)
kc.validate_regions.assert_called_once_with(['R1', 'R2', 'R3', 'R4'])
@mock.patch.object(pb.Policy, 'validate')
def test_validate_no_validate_props(self, mock_base_validate):
policy = rp.RegionPlacementPolicy('test-policy', self.spec)
ctx = mock.Mock(user='U1', project='P1')
res = policy.validate(ctx, False)
self.assertTrue(res)
mock_base_validate.assert_called_once_with(ctx, False)
@mock.patch.object(pb.Policy, 'validate')
def test_validate_region_not_found(self, mock_base_validate):
policy = rp.RegionPlacementPolicy('test-policy', self.spec)
kc = mock.Mock()
kc.validate_regions.return_value = ['R2', 'R4']
policy._keystoneclient = kc
ctx = mock.Mock(user='U1', project='P1')
ex = self.assertRaises(exc.InvalidSpec,
policy.validate,
ctx, True)
mock_base_validate.assert_called_once_with(ctx, True)
kc.validate_regions.assert_called_once_with(['R1', 'R2', 'R3', 'R4'])
self.assertEqual("The specified regions '['R1', 'R3']' could not "
"be found.", str(ex))
def test_create_plan(self):
policy = rp.RegionPlacementPolicy('p1', self.spec)
regions = policy.regions
current = {'R1': 2, 'R2': 2, 'R3': 2, 'R4': 1}
result = policy._create_plan(current, regions, 5, True)
expected = {'R1': 4, 'R2': 1}
self.assertEqual(expected, result)
current = {'R1': 2, 'R2': 2, 'R3': 0, 'R4': 1}
plan = policy._create_plan(current, regions, 5, True)
answer = {'R1': 3, 'R2': 1, 'R3': 1}
self.assertEqual(answer, plan)
current = {'R1': 2, 'R2': 2, 'R3': 0, 'R4': 1}
plan = policy._create_plan(current, regions, 3, False)
answer = {'R2': 2, 'R4': 1}
self.assertEqual(answer, plan)
current = {'R1': 4, 'R2': 2, 'R3': 1, 'R4': 1}
plan = policy._create_plan(current, regions, 3, False)
answer = {'R2': 1, 'R3': 1, 'R4': 1}
self.assertEqual(answer, plan)
def test_get_count_node_create_no_region(self):
x_profile = mock.Mock(CONTEXT='context', properties={'context': {}})
x_node = mock.Mock(rt={'profile': x_profile})
action = mock.Mock(action=consts.NODE_CREATE, entity=x_node)
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(1, res)
def test_get_count_node_create_region_specified(self):
x_profile = mock.Mock(CONTEXT='context',
properties={'context': {'region_name': 'foo'}})
x_node = mock.Mock(rt={'profile': x_profile})
action = mock.Mock(action=consts.NODE_CREATE, entity=x_node)
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(0, res)
def test_get_count_resize_deletion(self):
action = mock.Mock(action=consts.CLUSTER_RESIZE,
data={'deletion': {'count': 3}})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(-3, res)
def test_get_count_resize_creation(self):
action = mock.Mock(action=consts.CLUSTER_RESIZE,
data={'creation': {'count': 3}})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(3, res)
@mock.patch.object(su, 'parse_resize_params')
def test_get_count_resize_parse_error(self, mock_parse):
x_cluster = mock.Mock()
x_cluster.nodes = [mock.Mock(), mock.Mock()]
action = mock.Mock(action=consts.CLUSTER_RESIZE, data={})
action.entity = x_cluster
mock_parse.return_value = (pb.CHECK_ERROR, 'Something wrong.')
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(0, res)
self.assertEqual(pb.CHECK_ERROR, action.data['status'])
mock_parse.assert_called_once_with(action, x_cluster, 2)
self.assertEqual('Something wrong.', action.data['reason'])
@mock.patch.object(su, 'parse_resize_params')
def test_get_count_resize_parse_creation(self, mock_parse):
def fake_parse(action, cluster, current):
action.data = {'creation': {'count': 3}}
return pb.CHECK_OK, ''
x_cluster = mock.Mock()
x_cluster.nodes = []
action = mock.Mock(action=consts.CLUSTER_RESIZE, data={})
action.entity = x_cluster
mock_parse.side_effect = fake_parse
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(3, res)
mock_parse.assert_called_once_with(action, x_cluster, 0)
@mock.patch.object(su, 'parse_resize_params')
def test_get_count_resize_parse_deletion(self, mock_parse):
def fake_parse(action, cluster, current):
action.data = {'deletion': {'count': 3}}
return pb.CHECK_OK, ''
x_cluster = mock.Mock()
x_cluster.nodes = [mock.Mock(), mock.Mock(), mock.Mock()]
action = mock.Mock(action=consts.CLUSTER_RESIZE, data={})
action.entity = x_cluster
mock_parse.side_effect = fake_parse
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(-3, res)
mock_parse.assert_called_once_with(action, x_cluster, 3)
def test_get_count_scale_in_with_data(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_IN,
data={'deletion': {'count': 3}})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(-3, res)
def test_get_count_scale_in_with_no_data(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_IN,
data={'deletion': {'num': 3}})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(-1, res)
def test_get_count_scale_in_with_inputs(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={},
inputs={'count': 3})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(-3, res)
def test_get_count_scale_in_with_incorrect_inputs(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={},
inputs={'num': 3})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(-1, res)
def test_get_count_scale_out_with_data(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_OUT,
data={'creation': {'count': 3}})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(3, res)
def test_get_count_scale_out_with_no_data(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_OUT,
data={'creation': {'num': 3}})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(1, res)
def test_get_count_scale_out_with_inputs(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={},
inputs={'count': 3})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(3, res)
def test_get_count_scale_out_with_incorrect_inputs(self):
action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={},
inputs={'num': 3})
policy = rp.RegionPlacementPolicy('p1', self.spec)
res = policy._get_count('FOO', action)
self.assertEqual(1, res)
@mock.patch.object(cm.Cluster, 'load')
def test_pre_op(self, mock_load):
# test pre_op method whether returns the correct action.data
policy = rp.RegionPlacementPolicy('p1', self.spec)
regions = policy.regions
kc = mock.Mock()
kc.validate_regions.return_value = regions.keys()
policy._keystoneclient = kc
plan = {'R1': 1, 'R3': 2}
self.patchobject(policy, '_create_plan', return_value=plan)
action = mock.Mock()
action.context = self.context
action.action = 'CLUSTER_SCALE_OUT'
action.inputs = {}
action.data = {
'creation': {
'count': 3,
}
}
cluster = mock.Mock()
current_dist = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0}
cluster.get_region_distribution.return_value = current_dist
mock_load.return_value = cluster
res = policy.pre_op('FAKE_CLUSTER', action)
self.assertIsNone(res)
self.assertEqual(3, action.data['creation']['count'])
dist = action.data['creation']['regions']
self.assertEqual(2, len(dist))
self.assertEqual(1, dist['R1'])
self.assertEqual(2, dist['R3'])
mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER')
kc.validate_regions.assert_called_once_with(regions.keys())
cluster.get_region_distribution.assert_called_once_with(regions.keys())
policy._create_plan.assert_called_once_with(
current_dist, regions, 3, True)
@mock.patch.object(cm.Cluster, 'load')
def test_pre_op_count_from_inputs(self, mock_load):
# test pre_op method whether returns the correct action.data
policy = rp.RegionPlacementPolicy('p1', self.spec)
regions = policy.regions
kc = mock.Mock()
kc.validate_regions.return_value = regions.keys()
policy._keystoneclient = kc
cluster = mock.Mock()
current_dist = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0}
cluster.get_region_distribution.return_value = current_dist
mock_load.return_value = cluster
plan = {'R1': 1, 'R3': 2}
self.patchobject(policy, '_create_plan', return_value=plan)
action = mock.Mock()
action.context = self.context
action.action = 'CLUSTER_SCALE_OUT'
action.inputs = {'count': 3}
action.data = {}
res = policy.pre_op('FAKE_CLUSTER', action)
self.assertIsNone(res)
self.assertEqual(3, action.data['creation']['count'])
dist = action.data['creation']['regions']
self.assertEqual(2, len(dist))
self.assertEqual(1, dist['R1'])
self.assertEqual(2, dist['R3'])
@mock.patch.object(cm.Cluster, 'load')
def test_pre_op_no_regions(self, mock_load):
# test pre_op method whether returns the correct action.data
policy = rp.RegionPlacementPolicy('p1', self.spec)
kc = mock.Mock()
kc.validate_regions.return_value = []
policy._keystoneclient = kc
action = mock.Mock()
action.action = 'CLUSTER_SCALE_OUT'
action.context = self.context
action.data = {'creation': {'count': 3}}
cluster = mock.Mock()
mock_load.return_value = cluster
res = policy.pre_op('FAKE_CLUSTER', action)
self.assertIsNone(res)
self.assertEqual('ERROR', action.data['status'])
self.assertEqual('No region is found usable.', action.data['reason'])
@mock.patch.object(cm.Cluster, 'load')
def test_pre_op_no_feasible_plan(self, mock_load):
# test pre_op method whether returns the correct action.data
policy = rp.RegionPlacementPolicy('p1', self.spec)
regions = policy.regions
kc = mock.Mock()
kc.validate_regions.return_value = regions.keys()
policy._keystoneclient = kc
self.patchobject(policy, '_create_plan', return_value=None)
action = mock.Mock()
action.action = 'CLUSTER_SCALE_OUT'
action.context = self.context
action.inputs = {}
action.data = {'creation': {'count': 3}}
cluster = mock.Mock()
current_dist = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0}
cluster.get_region_distribution.return_value = current_dist
mock_load.return_value = cluster
res = policy.pre_op('FAKE_CLUSTER', action)
self.assertIsNone(res)
self.assertEqual('ERROR', action.data['status'])
self.assertEqual('There is no feasible plan to handle all nodes.',
action.data['reason'])
mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER')
kc.validate_regions.assert_called_once_with(regions.keys())
cluster.get_region_distribution.assert_called_once_with(regions.keys())
policy._create_plan.assert_called_once_with(
current_dist, regions, 3, True)
| {
"content_hash": "5cd94fcb7e3aefce56457c1acec2c686",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 79,
"avg_line_length": 36.97584541062802,
"alnum_prop": 0.5796968905147635,
"repo_name": "stackforge/senlin",
"id": "4e994f928e2103b1afcc3f094cb3d0e63335c1be",
"size": "15857",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/tests/unit/policies/test_region_placement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2145946"
},
{
"name": "Shell",
"bytes": "18730"
}
],
"symlink_target": ""
} |
import argparse
import boto
import boto.ec2
from boto_cli import configure_logging
from boto_cli.ec2 import *
import logging
log = logging.getLogger('boto_cli')
from pprint import pprint
# configure command line argument parsing
parser = argparse.ArgumentParser(description='Create snapshots of EBS volumes in all/some available EC2 regions')
parser.add_argument("-f", "--filter", action="append", help="An EBS volume filter. [can be used multiple times]")
parser.add_argument("-i", "--id", dest="resource_ids", action="append", help="An EBS volume id. [can be used multiple times]")
parser.add_argument("-d", "--description", help="A description for the EBS snapshot [default: <provided>]")
parser.add_argument("-bs", "--backup_set", default=DEFAULT_BACKUP_SET, help="A backup set name (determines retention correlation). [default: 'default'")
parser.add_argument("-r", "--region", help="A region substring selector (e.g. 'us-west')")
parser.add_argument("--access_key_id", dest='aws_access_key_id', help="Your AWS Access Key ID")
parser.add_argument("--secret_access_key", dest='aws_secret_access_key', help="Your AWS Secret Access Key")
parser.add_argument("-l", "--log", dest='log_level', default='WARNING',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="The logging level to use. [default: WARNING]")
args = parser.parse_args()
configure_logging(log, args.log_level)
def isSelected(region):
return True if region.name.find(args.region) != -1 else False
# execute business logic
credentials = {'aws_access_key_id': args.aws_access_key_id, 'aws_secret_access_key': args.aws_secret_access_key}
heading = "Snapshotting EBS volumes"
regions = boto.ec2.regions()
if args.region:
heading += " (filtered by region '" + args.region + "')"
regions = filter(isSelected, regions)
filters = None
if args.filter:
filters = dict([filter.split('=') for filter in args.filter])
log.info(args.filter)
log.debug(filters)
log.info(args.resource_ids)
backup_set = args.backup_set if args.backup_set else DEFAULT_BACKUP_SET
log.debug(backup_set)
print heading + ":"
for region in regions:
try:
ec2 = boto.connect_ec2(region=region, **credentials)
volumes = ec2.get_all_volumes(volume_ids=args.resource_ids, filters=filters)
print region.name + ": " + str(len(volumes)) + " volumes"
create_snapshots(ec2, volumes, backup_set, args.description)
except boto.exception.BotoServerError, e:
log.error(e.error_message)
| {
"content_hash": "6b52728aab63ac2241e0c3048c7f761c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 152,
"avg_line_length": 45.96363636363636,
"alnum_prop": 0.7021360759493671,
"repo_name": "cityindex-attic/ec2-clock-accuracy-research",
"id": "a71a91a2d9c3b42fe86d2197839fd9ed45266e3a",
"size": "2546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infrastructure/scripts/create-snapshots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "90852"
},
{
"name": "Ruby",
"bytes": "12902"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
} |
"""@package src.wi.utils.messages_ajax
@author Piotr Wójcik
@date 23.11.2010
"""
import json
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from wi.utils.exceptions import RestErrorException
def success(message, status=0):
"""
Returns json encoded ajax response.
"""
return HttpResponse(content=json.dumps({'status': status, 'data': message}), content_type="application/json")
def error(message):
"""
Returns json encoded ajax response (error).
"""
return success(message, status=8000)
def success_with_key(message, filename, name, status=0):
"""
Returns json encoded ajax response containing a file.
"""
return HttpResponse(content=json.dumps({'status': status, 'data': message, 'file': filename, 'name': name}), content_type="application/json")
def ajax_request(view_func):
"""
Decorator checking whether request is an AJAX request.
"""
def wrap(request, *args, **kwds):
"""
Returned decorated function.
"""
if not request.is_ajax():
return error(_('Not AJAX request!'))
try:
return view_func(request, *args, **kwds)
except RestErrorException as ex:
return error(ex.value)
return wrap
| {
"content_hash": "e68d40af1c6f514c6c6e29071abbff38",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 145,
"avg_line_length": 25.84,
"alnum_prop": 0.6509287925696594,
"repo_name": "cc1-cloud/cc1",
"id": "756f9b6774bde24b9404f81a37981c458aab36cc",
"size": "1991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wi/utils/messages_ajax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63829"
},
{
"name": "HTML",
"bytes": "323260"
},
{
"name": "JavaScript",
"bytes": "458924"
},
{
"name": "Python",
"bytes": "1466456"
},
{
"name": "Shell",
"bytes": "14317"
}
],
"symlink_target": ""
} |
import wrappers
import piqi
import piq
import piqi_of_json
# config
#
# TODO: make configurable
piq_relaxed_parsing = True
# state
#
# TODO, XXX: wrap in a Parser class?
_depth = 0
class ParseError(Exception):
def __init__(self, loc, error):
self.depth = _depth # used for backtracking
self.error = error
self.loc = loc
def make_scalar(x, loc):
if isinstance(x, piq.ObjectProxy):
# prevent leaking piq-wrapped objects into piqi objects, it could lead
# to all sorts of problems, including json.dumps() crashing on such
# values
assert False
return piqi.make_scalar(x, loc)
# top-level call
def parse(typename, x):
# init parsing state
global _depth
_depth = 0
# XXX: convert piq.ParseError into piqi.ParseError
try:
piq_ast = piq.parse(x, expand_splices=True, expand_names=True)
except piq.ParseError as e:
raise piqi.ParseError(e.loc, e.error)
# convert .ParseError into piqi.ParseError
try:
return parse_obj(typename, piq_ast)
except ParseError as e:
raise piqi.ParseError(e.loc, e.error)
def parse_obj(typename, x, try_mode=False, nested_variant=False, labeled=False, typedef_index=None):
piqi_type = piqi.get_piqi_type(typename)
if piqi_type: # one of built-in types
if piqi_type == 'bool':
return parse_bool(x)
elif piqi_type == 'int':
return parse_int(x)
elif piqi_type == 'float':
return parse_float(x)
elif piqi_type == 'string':
return parse_string(x)
elif piqi_type == 'binary':
return parse_binary(x)
elif piqi_type == 'any':
return parse_any(x)
else:
assert False
else: # user-defined type
type_tag, typedef = piqi.resolve_type(typename)
if type_tag == 'record':
return parse_record(typedef, x, labeled=labeled)
elif type_tag == 'list':
return parse_list(typedef, x)
elif type_tag == 'variant':
return parse_variant(typedef, x, try_mode=try_mode, nested_variant=nested_variant)
elif type_tag == 'enum':
return parse_enum(typedef, x, try_mode=try_mode, nested_variant=nested_variant)
elif type_tag == 'alias':
return parse_alias(typedef, x, try_mode=try_mode, nested_variant=nested_variant, labeled=labeled)
else:
assert False
def parse_list(t, x):
if isinstance(x, piq.List):
# TODO: fix this ugliness, for parse_record too
global _depth
_depth += 1
res = do_parse_list(t, l, loc=x.loc)
_depth -= 1;
return res
else:
raise ParseError(x.loc, 'list expected')
def do_parse_list(t, l, loc=None):
item_type = t['type']
items = [parse_obj(item_type, x) for x in l]
return piqi.make_list(items, t['name'], loc)
def parse_record(t, x, labeled=False):
if isinstance(x, piq.List):
l = x.items
loc = x.loc
elif labeled and t.get('piq_allow_unnesting'):
# allow field unnesting for a labeled record
l = [x]
loc = x.loc
else:
raise ParseError(x.loc, 'list expected')
global _depth
_depth += 1
# NOTE: pass locating information as a separate parameter since empty
# list is unboxed and doesn't provide correct location information
res = do_parse_record(t, l, loc=loc)
_depth -= 1;
return res
def do_parse_record(t, l, loc=None):
field_spec_list = t['field']
# parse required fields first
required, optional = [], []
for f in field_spec_list:
(optional, required)[f['mode'] == 'required'].append(f)
field_spec_list = required + optional
parsed_fields = []
for field_spec in field_spec_list:
value, l = parse_field(field_spec, l, loc=loc)
name = piqi.make_field_name(field_spec)
parsed_fields.append((name, value))
for x in l:
raise ParseError(x.loc, 'unknown field: ' + str(x))
return piqi.make_record(parsed_fields, t['name'], loc)
def parse_field(t, l, loc=None):
#print 'parse field', piqi.name_of_field(t), l
if t.get('type'):
return do_parse_field(t, l, loc=loc)
else:
return do_parse_flag(t, l, loc=loc)
def maybe_report_duplicate_field(name, l):
# TODO: warnings on several duplicates fields
if len(l) > 1:
raise ParseError(l[1].loc, 'duplicate field ' + quote(name))
def quote(name):
return "'" + name + "'"
def do_parse_flag(t, l, loc=None):
name = piqi.name_of_field(t)
# NOTE: flags can't be positional so we only have to look for them by name
res, rem = find_flags(name, t.get('piq_alias'), l)
if res == []:
# missing flag implies False value
return make_scalar(False, loc), rem
else:
x = res[0]
maybe_report_duplicate_field(name, res)
if isinstance(x, piq.Name) or (isinstance(x, piq.Named) and isinstance(x.value, piq.Scalar) and x.value.value == True):
# flag is considered as present when it is represented either as name
# w/o value or named boolean true value
return make_scalar(True, loc), rem
elif isinstance(x, piq.Named) and isinstance(x.value, piq.Scalar) and x.value.value == False:
# flag is considered missing/unset when its value is false
return make_scalar(False, loc), rem
else:
# there are no other possible representations of flags
assert False
def do_parse_field(t, l, loc=None):
name = piqi.name_of_field(t)
field_type = t['type']
field_mode = t['mode']
if field_mode == 'required':
return parse_required_field(t, name, field_type, l, loc=loc)
elif field_mode == 'optional':
return parse_optional_field(t, name, field_type, t.get('default'), l)
elif field_mode == 'repeated':
return parse_repeated_field(t, name, field_type, l)
else:
assert False
def parse_required_field(t, name, field_type, l, loc=None):
res, rem = find_fields(name, t.get('piq_alias'), field_type, l)
if res == []:
# try finding the first field which is successfully parsed by
# 'parse_obj' for a given field type
res, rem = find_first_parsed_field(t, field_type, l)
if res is None:
raise ParseError(loc, 'missing field ' + quote(name))
else:
return res, rem
else:
x = res[0]
maybe_report_duplicate_field(name, res)
obj = parse_obj(field_type, x, labeled=True)
return obj, rem
def parse_optional_field(t, name, field_type, default, l):
res, rem = find_fields(name, t.get('piq_alias'), field_type, l)
if res == []:
# try finding the first field which is successfully parsed by
# 'parse_obj' for a given field type
res, rem = find_first_parsed_field(t, field_type, l)
if res is None:
res = parse_default(field_type, default)
return res, l
else:
return res, rem
else:
x = res[0]
maybe_report_duplicate_field(name, res)
obj = parse_obj(field_type, x, labeled=True)
return obj, rem
def parse_repeated_field(t, name, field_type, l):
res, rem = find_fields(name, t.get('piq_alias'), field_type, l)
if res == []:
# XXX: ignore errors occurring when unknown element is present in the
# list allowing other fields to find their members among the list of
# elements
res, rem = find_all_parsed_fields(t, field_type, l)
return res, rem
else:
# use strict parsing
res = [parse_obj(field_type, x, labeled=True) for x in res]
return res, rem
def parse_default(field_type, default):
if default is None:
return None
else:
# TODO, XXX: parse default in piqic-python instead of runtime
return piqi_of_json.parse_default(field_type, default)
def find_first_parsed_field(t, field_type, l):
res = None
rem = []
for x in l:
if res:
# already found => copy the reminder
rem.append(x)
else:
obj = try_parse_field(t, field_type, x)
if obj: # found
res = obj
else:
rem.append(x)
return res, rem
def find_all_parsed_fields(t, field_type, l):
res = []
rem = []
for x in l:
obj = try_parse_field(t, field_type, x)
if obj:
res.append(obj)
else:
rem.append(x)
return res, rem
def try_parse_field(field_spec, field_type, x):
type_tag, typedef = piqi.unalias(field_type)
piq_positional = field_spec.get('piq_positional')
if piq_positional == False:
# this field must be always labeled according to the explicit
# ".piq-positional false"
return None
elif not piq_positional and type_tag in ('record', 'list'):
# all records and lists should be labeled (i.e. can't be positional)
# unless explicitly overridden in the piqi spec by ".piq-positional
# true"
return None
elif type_tag == 'any' and not field_type.get('name'):
# NOTE, XXX: try-parsing of labeled any always failes
return None
# NOTE, XXX: try-parsing of unlabeled `any always succeeds
else:
global _depth
depth = _depth
try:
return parse_obj(field_type, x, try_mode=True)
except ParseError as e:
# ignore errors which occur at the same parse depth, i.e. when
# parsing everything except for lists and records which increment
# depth
if e.depth == depth:
# restore the original depth
_depth = depth
return None
# find field by name, return found fields and remaining fields
def find_fields(name, alt_name, field_type, l):
def name_matches(n):
return (n == name or n == alt_name)
res = []
rem = []
for x in l:
if isinstance(x, piq.Named) and name_matches(x.name):
res.append(x.value)
elif isinstance(x, piq.Name) and name_matches(x.name):
type_tag, typedef = piqi.unalias(field_type)
if type_tag == 'bool':
# allow omitting boolean constant for a boolean field by
# interpreting the missing value as "true"
piq_ast = piq.Scalar(True, x.loc)
res.append(piq_ast)
else:
raise ParseError(x.loc, 'value must be specified for field ' + quote(x.name))
else:
rem.append(x)
return res, rem
# find flags by name, return found flags and remaining fields
def find_flags(name, alt_name, l):
def name_matches(n):
return (n == name or n == alt_name)
res = []
rem = []
for x in l:
if isinstance(x, piq.Name) and name_matches(x.name):
res.append(x)
elif isinstance(x, piq.Named) and name_matches(x.name):
# allow specifying true or false as flag values: true will be
# interpreted as flag presence, false is treated as if the flag was
# missing
if isinstance(x.value.value, bool):
res.append(x)
else:
raise ParseError(x.loc, 'only true and false can be used as values for flag ' + quote(x.name))
else:
rem.append(x)
return res, rem
def parse_variant(t, x, try_mode=False, nested_variant=False):
option_spec_list = t['option']
tag, value = parse_options(option_spec_list, x, try_mode=try_mode, nested_variant=nested_variant)
return piqi.make_variant(tag, value, t['name'], x.loc)
def parse_enum(t, x, try_mode=False, nested_variant=False):
option_spec_list = t['option']
tag, _ = parse_options(option_spec_list, x, try_mode=try_mode, nested_variant=nested_variant)
return piqi.make_enum(tag, t['name'], x.loc)
class UnknownVariant(Exception):
pass
def parse_options(option_spec_list, x, try_mode=False, nested_variant=False):
for option_spec in option_spec_list:
res = parse_option(option_spec, x, try_mode=try_mode)
if res is not None: # success
return res
else:
res = parse_nested_option(option_spec, x, try_mode=try_mode)
if res is not None:
return res
else:
# continue with other options
pass
# none of the options matches
if nested_variant:
raise UnknownVariant
else:
raise ParseError(x.loc, 'unknown variant: ' + str(x))
def parse_option(t, x, try_mode=False):
if isinstance(x, piq.Name):
return parse_name_option(t, x.name, loc=x.loc)
elif isinstance(x, piq.Named):
return parse_named_option(t, x.name, x.value, loc=x.loc)
else:
return parse_option_by_type(t, x, try_mode=try_mode)
# recursively descent into non-terminal (i.e. nameless variant and enum) options
#
# NOTE: recurse into aliased nested variants as well
def parse_nested_option(t, x, try_mode=False):
option_name = piqi.name_of_option(t)
option_type = t.get('type')
if t.get('name') is None and option_type:
type_tag, typedef = piqi.unalias(option_type)
is_nested_variant = (type_tag == 'variant' or type_tag == 'enum')
if is_nested_variant:
try:
tag = option_name
value = parse_obj(option_type, x, try_mode=try_mode, nested_variant=True)
return tag, value
except UnknownVariant:
pass
return None
def parse_name_option(t, name, loc=None):
option_name = piqi.name_of_option(t)
if name == option_name or name == t.get('piq_alias'):
option_type = t.get('type')
if option_type:
raise ParseError(loc, 'value expected for option ' + quote(name))
else:
tag = option_name
value = None
return tag, value
else:
return None
def parse_named_option(t, name, x, loc=None):
option_name = piqi.name_of_option(t)
if name == option_name or name == t.get('piq_alias'):
option_type = t.get('type')
if not option_type:
raise ParseError(loc, 'value can not be specified for option ', quote(name))
else:
tag = option_name
value = parse_obj(option_type, x, labeled=True)
return tag, value
else:
return None
def parse_option_by_type(t, x, try_mode=False):
option_name = t.get('name')
option_type = t.get('type')
if option_name and not option_type:
# try parsing word as a name, but only when the label is exact, i.e.
# try_mode = false
#
# by doing this, we allow using --foo bar instead of --foo.bar in
# relaxed piq parsing and getopt modes
if isinstance(x, piq.Scalar) and isinstance(x.value, basestring):
word = x.value
if (word == option_name or word == t.get('piq_alias')) and piq_relaxed_parsing and not try_mode:
tag = option_name
value = None
return tag, value
else:
return None
else:
return None
elif option_type:
parse = False
type_tag, typedef = piqi.unalias(option_type)
if isinstance(x, piq.Scalar):
if type_tag == 'bool' and isinstance(x.value, bool):
parse = True
elif type_tag == 'int' and isinstance(x.value, int):
parse = True
elif type_tag == 'float' and isinstance(x.value, (int, float)):
parse = True
elif type_tag == 'string' and isinstance(x.value, basestring):
parse = True
elif type_tag == 'string' and isinstance(x.value, (int, uint, float, bool)) and piq_relaxed_parsing:
parse = True
elif type_tag == 'binary' and isinstance(x.value, basestring):
parse = True
elif type_tag in ('record', 'list') and isinstance(x, piq.List):
parse = True
if parse:
tag = piqi.name_of_option(t)
value = parse_obj(option_type, x)
return tag, value
else:
return None
else:
assert False
def parse_alias(t, x, try_mode=False, nested_variant=False, labeled=False):
alias_type = t['type']
return parse_obj(alias_type, x, try_mode=try_mode, nested_variant=nested_variant, labeled=labeled)
def parse_bool(x):
if isinstance(x, piq.Scalar) and isinstance(x.value, bool):
return make_scalar(x.value, x.loc)
else:
raise ParseError(x.loc, 'bool constant expected')
def parse_int(x):
if isinstance(x, piq.Scalar) and isinstance(x.value, int):
return make_scalar(x.value, x.loc)
else:
raise ParseError(x.loc, 'int constant expected')
def parse_float(x):
if isinstance(x, piq.Scalar) and isinstance(x.value, float):
return make_scalar(x.value, x.loc)
elif isinstance(x, piq.Scalar) and isinstance(x.value, int):
return make_scalar(x.value * 1.0, x.loc)
else:
raise ParseError(x.loc, 'float constant expected')
def parse_string(x):
if isinstance(x, piq.Scalar) and isinstance(x.value, basestring):
# TODO: check for correct unicode
return make_scalar(x.value, x.loc)
elif isinstance(x, piq.Scalar) and isinstance(x.value, (int, float)) and piq_relaxed_parsing:
return make_scalar(str(x.value), x.loc)
elif isinstance(x, piq.Scalar) and isinstance(x.value, bool) and piq_relaxed_parsing:
if x.value:
return make_scalar('true', x.loc)
else:
return make_scalar('false', x.loc)
else:
raise ParseError(x.loc, 'string expected')
def parse_binary(x):
if isinstance(x, piq.Scalar) and isinstance(x.value, basestring):
# TODO: check for 8-bit characters
return make_scalar(x.value, x.loc)
else:
raise ParseError(x.loc, 'binary expected')
def parse_any(x):
# TODO: not supported yet
assert False
| {
"content_hash": "e4b069d39fa69c2732acc386944d0914",
"timestamp": "",
"source": "github",
"line_count": 560,
"max_line_length": 127,
"avg_line_length": 32.68928571428572,
"alnum_prop": 0.591554681525183,
"repo_name": "alavrik/piqi-python",
"id": "ab95c13612d9d557faa82de06774b71ee39e7b40",
"size": "18306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "piqi_of_piq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96426"
}
],
"symlink_target": ""
} |
from ._wrapper import *
from ._utils import *
from ._kernels import *
from ._settings import *
| {
"content_hash": "5af51ad085dcd168d1c98755e8ee8e10",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 24,
"avg_line_length": 23.75,
"alnum_prop": 0.7052631578947368,
"repo_name": "johnnygreco/hugs",
"id": "968a2389ba74916eb0f7191ce3895cfc656ab4b4",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hugs/sextractor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1396968"
},
{
"name": "Python",
"bytes": "168162"
}
],
"symlink_target": ""
} |
import ctypes
lib = ctypes.cdll.LoadLibrary("./libijson.so");
def compact(value):
out = ctypes.create_string_buffer(len(value))
out_len = lib.ijson_compact(value, len(value), out)
return out.raw[:out_len]
| {
"content_hash": "a71effb9c1e2c04cfc1795f6e894d346",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 27.375,
"alnum_prop": 0.6894977168949772,
"repo_name": "NegativeMjark/indolent-json",
"id": "dd4d0c4fa92c5bd95328895b7de86e94bd15bcc1",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ijson.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15250"
},
{
"name": "C++",
"bytes": "955"
},
{
"name": "Python",
"bytes": "219"
}
],
"symlink_target": ""
} |
from userprofile.models import UserProfile
from social_auth.backends.facebook import FacebookBackend
from social_auth.backends.twitter import TwitterBackend
def update_user_social_data(backend, details, response, social_user, uid,
user, *args, **kwargs):
"""Update the information for the user profile
"""
user_profile, created = UserProfile.objects.get_or_create(user=user)
if created:
if backend.__class__ == FacebookBackend:
user_profile.facebook_uid = response['id']
elif backend.__class__ == TwitterBackend:
user_profile.twitter = response['screen_name']
user_profile.save()
| {
"content_hash": "65f281d09e7c2270a53134ce9f1c0b95",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 33.9,
"alnum_prop": 0.668141592920354,
"repo_name": "andresfcardenas/marketing-platform",
"id": "bbcacdeb4efa180909c9c4cd15e1e5e49e591f0f",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/pipeline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "470943"
},
{
"name": "JavaScript",
"bytes": "165430"
},
{
"name": "Python",
"bytes": "161259"
}
],
"symlink_target": ""
} |
from malscrape.misc import validate_media
from bs4 import BeautifulSoup as soup
import requests
def extract_info(html):
page_data = soup(html, 'html.parser')
return {
'members': _extract_members(page_data),
'name': _extract_mal_title(page_data),
'eng_name': _extract_english_name(page_data),
'tags': _extract_genres(page_data),
'portrait_cover': _extract_cover_url(page_data),
'authors': _extract_authors(page_data),
'publishing_type': _extract_publishing_type(page_data),
'related_media': _extract_all_related_media(page_data),
'closely_related_media': _extract_closely_related_media(page_data),
}
def _extract_members(page):
members = (page
.find('span', 'numbers members')
.strong
.text)
return int(''.join(filter(str.isdigit, members)))
def _extract_mal_title(page):
return page.find('span', itemprop='name').text
def _extract_genres(page):
try:
genres_div = page.find('span', string='Genres:').parent
return [a.attrs['title'] for a in genres_div.find_all('a')]
except:
return None
def _extract_english_name(page):
previous_tag = page.find('span', string='English:')
if not previous_tag: return None
return previous_tag.next_sibling.strip()
def _extract_authors(page):
try:
authors_div = page.find('span', string='Authors:').parent
return [a.text for a in authors_div.find_all('a')]
except:
return None
def _extract_cover_url(page):
try:
return page.find(
'img',
itemprop='image',
class_='ac'
).attrs['src']
except:
return None
def _extract_publishing_type(page):
try:
previous_tag = page.find('span', string='Type:')
try:
return previous_tag.parent.a.text.strip()
except:
pass
return previous_tag.next_sibling.strip()
except:
return None
def __split_related_media_href(href):
_, type, id, *_ = href.split('/')
try:
validate_media(type), int(id)
except ValueError:
return None
return type, id
def _extract_all_related_media(page_data):
try:
table_related = page_data.find(
'table',
class_='anime_detail_related_anime')
related_urls = (a.attrs['href'] for a in table_related.find_all('a'))
related_media = [__split_related_media_href(url) for url in related_urls]
return [media for media in related_media if media]
except Exception:
return None
def _extract_closely_related_media(page_data):
close_relations = [
'Side story:',
#'Alternative version:',
'Sequel:',
'Prequel:',
'Summary:',
'Parent story:',
]
table = page_data.find('table', class_='anime_detail_related_anime')
try:
tbody_elems = list(table.next_element.descendants)
except Exception:
return None
series = []
for elem_A, elem_B in zip(tbody_elems, tbody_elems[1:]):
if elem_A not in close_relations:
continue
related_urls = (a.attrs['href'] for a in elem_B.find_all('a'))
series += [__split_related_media_href(url) for url in related_urls]
return [media for media in series if media]
def media_url(id, type):
return '/'.join(('https://myanimelist.net', type, str(id)))
def fetch_info(id, type):
"""
Returns a dictionary with information about the media(id, type).
Currently only fetches 'members'(popularity count).
"""
validate_media(type)
url = media_url(id, type)
try:
response = requests.get(url)
html = response.content
return extract_info(html)
except requests.ConnectionError:
print(f"Timed out on fetching {type}:{id} info")
return None
except Exception as err:
print(id, type, '-', response.status_code, '-', err)
if response.status_code == 404:
return []
return None
setattr(
fetch_info,
'FIELDS_IMPLEMENTED',
[
'members',
'name',
'eng_name',
'tags',
'portrait_cover',
'authors',
'publishing_type',
'related_media',
'closely_related_media',
])
if __name__ == '__main__':
from pprint import pprint as print
z=fetch_info(2, 'manga')
print(z) | {
"content_hash": "3379924f53092cc561f5e283bd95669c",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 81,
"avg_line_length": 26.12280701754386,
"alnum_prop": 0.5845086187597941,
"repo_name": "Arctice/anime-birb-uk",
"id": "93035370ed8bf6e36fa382e1b1e6ef0c218244e3",
"size": "4467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mal_recs/malscrape/media_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "294"
},
{
"name": "CSS",
"bytes": "13665"
},
{
"name": "Clojure",
"bytes": "6358"
},
{
"name": "HTML",
"bytes": "13531"
},
{
"name": "Python",
"bytes": "40436"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
} |
import dynet
import numpy as np
class CRF():
def __init__(self, model, id_to_tag):
self.id_to_tag = id_to_tag
self.tag_to_id = {tag: id for id, tag in id_to_tag.items()}
self.n_tags = len(self.id_to_tag)
self.b_id = len(self.tag_to_id)
self.e_id = len(self.tag_to_id) + 1
self.transitions = model.add_lookup_parameters((self.n_tags+2,
self.n_tags+2),
name="transitions")
def score_sentence(self, observations, tags):
assert len(observations) == len(tags)
score_seq = [0]
score = dynet.scalarInput(0)
tags = [self.b_id] + tags
for i, obs in enumerate(observations):
# print self.b_id
# print self.e_id
# print obs.value()
# print tags
# print self.transitions
# print self.transitions[tags[i+1]].value()
score = score \
+ dynet.pick(self.transitions[tags[i + 1]], tags[i])\
+ dynet.pick(obs, tags[i + 1])
score_seq.append(score.value())
score = score + dynet.pick(self.transitions[self.e_id], tags[-1])
return score
def viterbi_loss(self, observations, tags):
observations = [dynet.concatenate([obs, dynet.inputVector([-1e10, -1e10])], d=0) for obs in
observations]
viterbi_tags, viterbi_score = self.viterbi_decoding(observations)
if viterbi_tags != tags:
gold_score = self.score_sentence(observations, tags)
return (viterbi_score - gold_score), viterbi_tags
else:
return dynet.scalarInput(0), viterbi_tags
def neg_log_loss(self, observations, tags):
observations = [dynet.concatenate([obs, dynet.inputVector([-1e10, -1e10])], d=0) for obs in observations]
gold_score = self.score_sentence(observations, tags)
forward_score = self.forward(observations)
return forward_score - gold_score
def forward(self, observations):
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dynet.pick(scores, argmax_score)
max_score_expr_broadcast = dynet.concatenate([max_score_expr] * (self.n_tags+2))
return max_score_expr + dynet.log(
dynet.sum_dims(dynet.transpose(dynet.exp(scores - max_score_expr_broadcast)), [1]))
init_alphas = [-1e10] * (self.n_tags + 2)
init_alphas[self.b_id] = 0
for_expr = dynet.inputVector(init_alphas)
for idx, obs in enumerate(observations):
# print "obs: ", obs.value()
alphas_t = []
for next_tag in range(self.n_tags+2):
obs_broadcast = dynet.concatenate([dynet.pick(obs, next_tag)] * (self.n_tags + 2))
# print "for_expr: ", for_expr.value()
# print "transitions next_tag: ", self.transitions[next_tag].value()
# print "obs_broadcast: ", obs_broadcast.value()
next_tag_expr = for_expr + self.transitions[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dynet.concatenate(alphas_t)
terminal_expr = for_expr + self.transitions[self.e_id]
alpha = log_sum_exp(terminal_expr)
return alpha
def viterbi_decoding(self, observations):
backpointers = []
init_vvars = [-1e10] * (self.n_tags + 2)
init_vvars[self.b_id] = 0 # <Start> has all the probability
for_expr = dynet.inputVector(init_vvars)
trans_exprs = [self.transitions[idx] for idx in range(self.n_tags + 2)]
for obs in observations:
bptrs_t = []
vvars_t = []
for next_tag in range(self.n_tags + 2):
next_tag_expr = for_expr + trans_exprs[next_tag]
next_tag_arr = next_tag_expr.npvalue()
best_tag_id = np.argmax(next_tag_arr)
bptrs_t.append(best_tag_id)
vvars_t.append(dynet.pick(next_tag_expr, best_tag_id))
for_expr = dynet.concatenate(vvars_t) + obs
backpointers.append(bptrs_t)
# Perform final transition to terminal
terminal_expr = for_expr + trans_exprs[self.e_id]
terminal_arr = terminal_expr.npvalue()
best_tag_id = np.argmax(terminal_arr)
path_score = dynet.pick(terminal_expr, best_tag_id)
# Reverse over the backpointers to get the best path
best_path = [best_tag_id] # Start with the tag that was best for terminal
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop() # Remove the start symbol
best_path.reverse()
assert start == self.b_id
# Return best path and best path's score
return best_path, path_score | {
"content_hash": "3fa0b093003ac98a558bd233875ef4e4",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 113,
"avg_line_length": 43.12820512820513,
"alnum_prop": 0.5661910424098295,
"repo_name": "onurgu/ner-tagger-tensorflow",
"id": "5c88233baeb93a072bd0368628f3e0248b191dad",
"size": "5119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "12728"
},
{
"name": "Python",
"bytes": "91338"
}
],
"symlink_target": ""
} |
from setuptools import setup
import version
setup(
name = 'distci',
version = version.git_version(),
packages = [ 'distci',
'distci.frontend',
'distci.distcilib',
'distci.worker',
'distci.worker.build_control',
'distci.worker.git_checkout',
'distci.worker.execute_shell',
'distci.worker.publish_artifacts',
'distci.worker.copy_artifacts',
'distci.cli' ],
package_dir = { 'distci': 'src/distci' },
entry_points = {
'console_scripts': [
'distci = distci.cli.__main__:main_entry',
'distci-build-control-worker = distci.worker.build_control.__main__:main_entry',
'distci-git-checkout-worker = distci.worker.git_checkout.__main__:main_entry',
'distci-execute-shell-worker = distci.worker.execute_shell.__main__:main_entry',
'distci-publish-artifacts-worker = distci.worker.publish_artifacts.__main__:main_entry',
'distci-copy-artifacts-worker = distci.worker.copy_artifacts.__main__:main_entry'
]
},
author = 'Heikki Nousiainen',
author_email = 'Heikki.Nousiainen@F-Secure.com',
url = 'http://github.com/F-Secure/distci',
data_files = [('distci/frontend/ui',
['src/ui/index.html']),
('distci/frontend/ui/js',
['src/ui/js/app.js',
'src/ui/js/controllers.js']),
('distci/frontend/ui/html',
['src/ui/html/jobbuildstate.html',
'src/ui/html/jobbuilds.html',
'src/ui/html/jobs.html']),
('distci/frontend/ui/css',
['src/ui/css/app.css'])]
)
| {
"content_hash": "a344fc06534635a3235284912a6e82fa",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 100,
"avg_line_length": 42.395348837209305,
"alnum_prop": 0.521667580910587,
"repo_name": "F-Secure/distci",
"id": "67844a532e1d3779b80f67b6c3db48a51a8c2f94",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "366"
},
{
"name": "JavaScript",
"bytes": "1855"
},
{
"name": "Python",
"bytes": "206984"
},
{
"name": "Shell",
"bytes": "10335"
}
],
"symlink_target": ""
} |
import pytest
from six import PY2
from datadog_checks.dev.tooling.configuration.consumers.example import DESCRIPTION_LINE_LENGTH_LIMIT
from ..utils import get_example_consumer, normalize_yaml
def test_option_no_section():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: ad_identifiers
overrides:
value.example:
- httpd
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param ad_identifiers - list of strings - required
## A list of container identifiers that are used by Autodiscovery to identify
## which container the check should be run against. For more information, see:
## https://docs.datadoghq.com/agent/guide/ad_identifiers/
#
ad_identifiers:
- httpd
"""
)
def test_section_with_option():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
value:
type: string
- name: bar
description: bar words
value:
type: number
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
# foo: <FOO>
## @param bar - number - optional
## bar words
#
# bar: <BAR>
"""
)
def test_section_with_option_hidden():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
value:
type: string
- name: bar
description: bar words
hidden: true
value:
type: number
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
# foo: <FOO>
"""
)
def test_section_hidden():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
hidden: true
options:
- name: foo
description: foo words
value:
type: string
- template: instances
options:
- name: bar
description: bar words
required: true
value:
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
## @param bar - string - required
## bar words
#
- bar: <BAR>
"""
)
def test_section_with_option_display_priority():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: fourth
description: fourth words
display_priority: -5
value:
type: string
- name: fifth
description: fifth words
display_priority: -50
value:
type: string
- name: third
description: third words
# default display_priority: 0
value:
type: string
- name: first
description: first words
display_priority: 100
value:
type: number
- name: second
description: second words
display_priority: 10
value:
type: number
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param first - number - optional
## first words
#
# first: <FIRST>
## @param second - number - optional
## second words
#
# second: <SECOND>
## @param third - string - optional
## third words
#
# third: <THIRD>
## @param fourth - string - optional
## fourth words
#
# fourth: <FOURTH>
## @param fifth - string - optional
## fifth words
#
# fifth: <FIFTH>
"""
)
def test_section_example():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: foo words
example: here
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## foo words
#
# foo: here
"""
)
@pytest.mark.skipif(PY2, reason='Dictionary key order is not guaranteed in Python 2')
def test_section_example_indent():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
value:
type: string
- template: logs
example:
- type: file
path: /var/log/apache2/access.log
source: apache
service: apache
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
# foo: <FOO>
## Log Section
##
## type - required - Type of log input source (tcp / udp / file / windows_event).
## port / path / channel_path - required - Set port if type is tcp or udp.
## Set path if type is file.
## Set channel_path if type is windows_event.
## source - required - Attribute that defines which integration sent the logs.
## encoding - optional - For file specifies the file encoding. Default is utf-8. Other
## possible values are utf-16-le and utf-16-be.
## service - optional - The name of the service that generates the log.
## Overrides any `service` defined in the `init_config` section.
## tags - optional - Add tags to the collected logs.
##
## Discover Datadog log collection: https://docs.datadoghq.com/logs/log_collection/
#
# logs:
# - type: file
# path: /var/log/apache2/access.log
# source: apache
# service: apache
"""
)
@pytest.mark.skipif(PY2, reason='Dictionary key order is not guaranteed in Python 2')
def test_section_example_indent_required():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
value:
type: string
- template: logs
required: true
example:
- type: file
path: /var/log/apache2/access.log
source: apache
service: apache
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
# foo: <FOO>
## Log Section
##
## type - required - Type of log input source (tcp / udp / file / windows_event).
## port / path / channel_path - required - Set port if type is tcp or udp.
## Set path if type is file.
## Set channel_path if type is windows_event.
## source - required - Attribute that defines which integration sent the logs.
## encoding - optional - For file specifies the file encoding. Default is utf-8. Other
## possible values are utf-16-le and utf-16-be.
## service - optional - The name of the service that generates the log.
## Overrides any `service` defined in the `init_config` section.
## tags - optional - Add tags to the collected logs.
##
## Discover Datadog log collection: https://docs.datadoghq.com/logs/log_collection/
#
logs:
- type: file
path: /var/log/apache2/access.log
source: apache
service: apache
"""
)
def test_section_multiple_required():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
value:
type: string
- template: instances
options:
- name: bar
description: bar words
required: true
value:
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
# foo: <FOO>
## Every instance is scheduled independently of the others.
#
instances:
## @param bar - string - required
## bar words
#
- bar: <BAR>
"""
)
def test_section_multiple_no_required():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
value:
type: string
- template: instances
options:
- name: bar
description: bar words
value:
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
# foo: <FOO>
## Every instance is scheduled independently of the others.
#
instances:
-
## @param bar - string - optional
## bar words
#
# bar: <BAR>
"""
)
def test_section_multiple_required_not_first():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
value:
type: string
- template: instances
options:
- name: foo
description: foo words
value:
type: string
- name: bar
description: bar words
required: true
value:
type: string
"""
)
files = consumer.render()
contents, _ = files['test.yaml.example']
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
# foo: <FOO>
## Every instance is scheduled independently of the others.
#
instances:
-
## @param foo - string - optional
## foo words
#
# foo: <FOO>
## @param bar - string - required
## bar words
#
bar: <BAR>
"""
)
def test_option_object_type():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: object
example:
bar: it
properties:
- name: bar
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - mapping - optional
## words
#
# foo:
# bar: it
"""
)
def test_option_array_type_array():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: array
example:
- - 0
- 1
- - 2
- 3
items:
type: array
items:
type: integer
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - list of lists - optional
## words
#
# foo:
# - - 0
# - 1
# - - 2
# - 3
"""
)
def test_option_array_type_object():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: array
example:
- bar: it
items:
type: object
properties:
- name: bar
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - list of mappings - optional
## words
#
# foo:
# - bar: it
"""
)
def test_option_boolean_type():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: boolean
example: true
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - boolean - optional - default: true
## words
#
# foo: true
"""
)
def test_option_number_type():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: number
example: 5
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - number - optional - default: 5
## words
#
# foo: 5
"""
)
def test_option_number_type_default():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: number
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - number - optional
## words
#
# foo: <FOO>
"""
)
def test_option_string_type_not_default():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: string
example: something
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - string - optional - default: something
## words
#
# foo: something
"""
)
def test_option_string_type_not_default_example_default_value_none():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: string
example: something
display_default: null
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - string - optional
## words
#
# foo: something
"""
)
def test_option_string_type_not_default_example_default_value_null():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: string
example: something
display_default: null
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - string - optional
## words
#
# foo: something
"""
)
def test_section_description_length_limit():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: {}
options:
- name: bar
description: words
value:
type: string
""".format(
'a' * DESCRIPTION_LINE_LENGTH_LIMIT
)
)
files = consumer.render()
_, errors = files['test.yaml.example']
assert 'Description line length of section `foo` was over the limit by 3 characters' in errors
def test_option_description_length_limit():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: {}
value:
type: string
""".format(
'a' * DESCRIPTION_LINE_LENGTH_LIMIT
)
)
files = consumer.render()
_, errors = files['test.yaml.example']
assert 'Description line length of option `foo` was over the limit by 3 characters' in errors
def test_option_description_length_limit_with_noqa():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: {}
value:
type: string
example: something
""".format(
'a' * DESCRIPTION_LINE_LENGTH_LIMIT + ' /noqa'
)
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - string - optional - default: something
## {}
#
# foo: something
""".format(
'a' * DESCRIPTION_LINE_LENGTH_LIMIT
)
)
@pytest.mark.skipif(PY2, reason='Dictionary key order is not guaranteed in Python 2')
def test_deprecation():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
deprecation:
Agent version: 8.0.0
Migration: |
do this
and that
value:
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
##
## <<< DEPRECATED >>>
##
## Agent version: 8.0.0
## Migration: do this
## and that
#
# foo: <FOO>
"""
)
def test_template():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
options:
- name: foo
description: words
required: true
value:
type: string
- template: instances/global
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
## @param foo - string - required
## words
#
- foo: <FOO>
## @param min_collection_interval - number - optional - default: 15
## This changes the collection interval of the check. For more information, see:
## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval
#
# min_collection_interval: 15
## @param empty_default_hostname - boolean - optional - default: false
## This forces the check to send metrics with no hostname.
##
## This is useful for cluster-level checks.
#
# empty_default_hostname: false
"""
)
def test_template_recursion():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
options:
- name: foo
description: words
required: true
value:
type: string
- template: instances/default
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
## @param foo - string - required
## words
#
- foo: <FOO>
## @param tags - list of strings - optional
## A list of tags to attach to every metric and service check emitted by this instance.
##
## Learn more about tagging at https://docs.datadoghq.com/tagging
#
# tags:
# - <KEY_1>:<VALUE_1>
# - <KEY_2>:<VALUE_2>
## @param service - string - optional
## Attach the tag `service:<SERVICE>` to every metric, event, and service check emitted by this integration.
##
## Overrides any `service` defined in the `init_config` section.
#
# service: <SERVICE>
## @param min_collection_interval - number - optional - default: 15
## This changes the collection interval of the check. For more information, see:
## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval
#
# min_collection_interval: 15
## @param empty_default_hostname - boolean - optional - default: false
## This forces the check to send metrics with no hostname.
##
## This is useful for cluster-level checks.
#
# empty_default_hostname: false
## @param metric_patterns - mapping - optional
## A mapping of metrics to include or exclude, with each entry being a regular expression.
##
## Metrics defined in `exclude` will take precedence in case of overlap.
#
# metric_patterns:
# include:
# - <INCLUDE_REGEX>
# exclude:
# - <EXCLUDE_REGEX>
"""
)
def test_no_options():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options: []
- template: instances
options: []
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## Every instance is scheduled independently of the others.
#
instances:
- {}
"""
)
def test_compact_example():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: array
compact_example: true
example:
- - 0
- 1
- foo
- foo: bar
bar: baz
- - 2
- 3
items:
type: array
items:
type: integer
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - list of lists - optional
## words
#
# foo:
# - [0, 1]
# - "foo"
# - {foo: bar, bar: baz}
# - [2, 3]
"""
)
def test_compact_example_long_line():
long_str = "This string is very long and has 50 chars in it !!"
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: array
compact_example: true
example:
- - {0}
- {0}
- {0}
- {0}
items:
type: array
items:
type: string
""".format(
long_str
)
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - list of lists - optional
## words
#
# foo:
# - [{0}, {0}, {0}, {0}]
""".format(
long_str
)
)
def test_compact_example_nested():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
options:
- name: foo
description: words
value:
type: array
compact_example: true
example:
- - 0
- 1
- foo
- foo: bar
bar: baz
- - 2
- 3
items:
type: array
items:
type: integer
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
-
## @param foo - list of lists - optional
## words
#
# foo:
# - [0, 1]
# - "foo"
# - {foo: bar, bar: baz}
# - [2, 3]
"""
)
def test_option_default_example_override_null():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: string
example: something
display_default: null
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - string - optional
## words
#
# foo: something
"""
)
def test_option_default_example_override_string():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: string
example: something
display_default: bar
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - string - optional - default: bar
## words
#
# foo: something
"""
)
def test_option_default_example_override_non_string():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
value:
type: string
example: something
display_default:
foo: [bar, baz]
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## @param foo - string - optional - default: {'foo': ['bar', 'baz']}
## words
#
# foo: something
"""
)
def test_enabled_override_required():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: init_config
options:
- name: foo
description: foo words
required: false
enabled: true
value:
type: string
- template: instances
options:
- name: bar
description: bar words
required: true
enabled: false
value:
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## All options defined here are available to all instances.
#
init_config:
## @param foo - string - optional
## foo words
#
foo: <FOO>
## Every instance is scheduled independently of the others.
#
instances:
-
## @param bar - string - required
## bar words
#
# bar: <BAR>
"""
)
def test_option_multiple_types():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
options:
- name: foo
description: words
value:
anyOf:
- type: string
- type: array
items:
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
-
## @param foo - string or list of strings - optional
## words
#
# foo: <FOO>
"""
)
def test_option_multiple_types_nested():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
options:
- name: foo
description: words
value:
anyOf:
- type: string
- type: array
items:
anyOf:
- type: string
- type: object
properties:
- name: foo
type: string
required:
- foo
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
-
## @param foo - string or (list of string or mapping) - optional
## words
#
# foo: <FOO>
"""
)
def test_option_multiple_instances_defined():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
multiple_instances_defined: true
options:
- name: instance_1
description: Description of the first instance
options:
- name: foo
description: words
value:
type: string
- name: instance_2
description: |
Description of the second instance
Multiple lines
options:
- name: bar
description: description
value:
type: string
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
## Description of the first instance
-
## @param foo - string - optional
## words
#
# foo: <FOO>
## Description of the second instance
## Multiple lines
-
## @param bar - string - optional
## description
#
# bar: <BAR>
"""
)
def test_parent_option_disabled():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
options:
- name: enabled_option
required: true
description: Description of enabled option
value:
type: boolean
example: true
- name: parent_option
description: Description of parent option
options:
- name: sub_option_1
description: words
value:
type: boolean
example: true
- name: sub_option_2
description: words
value:
type: string
example: foo.bar_none
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
## @param enabled_option - boolean - required
## Description of enabled option
#
- enabled_option: true
## Description of parent option
#
# parent_option:
## @param sub_option_1 - boolean - optional - default: true
## words
#
# sub_option_1: true
## @param sub_option_2 - string - optional - default: foo.bar_none
## words
#
# sub_option_2: foo.bar_none
"""
)
def test_parent_option_enabled():
consumer = get_example_consumer(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- template: instances
options:
- name: enabled_option
required: true
description: Description of enabled option
value:
type: boolean
example: true
- name: parent_option
enabled: true
description: Description of parent option
options:
- name: enabled_sub_option
enabled: true
description: words
value:
type: boolean
example: true
- name: disabled_sub_option
description: words
value:
type: string
example: foo.bar_none
"""
)
files = consumer.render()
contents, errors = files['test.yaml.example']
assert not errors
assert contents == normalize_yaml(
"""
## Every instance is scheduled independently of the others.
#
instances:
## @param enabled_option - boolean - required
## Description of enabled option
#
- enabled_option: true
## Description of parent option
#
parent_option:
## @param enabled_sub_option - boolean - optional - default: true
## words
#
enabled_sub_option: true
## @param disabled_sub_option - string - optional - default: foo.bar_none
## words
#
# disabled_sub_option: foo.bar_none
"""
)
| {
"content_hash": "325b5f398d697ae6d9ff96f9af2529c7",
"timestamp": "",
"source": "github",
"line_count": 1711,
"max_line_length": 120,
"avg_line_length": 25.26300409117475,
"alnum_prop": 0.465748987854251,
"repo_name": "DataDog/integrations-core",
"id": "dc39461eb83ad2670003fb661b542dac544a700c",
"size": "43340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadog_checks_dev/tests/tooling/configuration/consumers/test_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
"""
sockless
========
A friendlier interface to `socket`.
``sockless`` emulates file-like objects, allowing you to call familiar methods
& iterate over the lines. It also includes
Usage::
import sockless
with sockless.open('irc.freenode.net:6665', mode='rw') as sock:
# Writing.
sock.write('NICK atestbot\r\n')
sock.write('USER atestbot bot@aserver.com unused :atestbot\r\n')
sock.write('JOIN #testbot\r\n')
# Reading lines from a socket.
for line in sock:
if not line:
break
if 'End of /NAMES list' in line:
print "Successfully connected & joined. Quitting."
break
print line.strip()
"""
import contextlib
import select
import socket
__author__ = 'Daniel Lindsley'
__license__ = 'BSD'
__version__ = (0, 9, 1)
DEFAULT_TIMEOUT = 60
DEFAULT_MAX_CONNS = 5
class SocklessException(Exception): pass
class TimedOut(SocklessException): pass
class AddressNotFound(SocklessException): pass
class BrokenConnection(SocklessException): pass
class NotConnected(SocklessException): pass
def split_address(address):
host, port = address.split(':')
return host, int(port)
class Socket(object):
def __init__(
self,
address,
timeout=DEFAULT_TIMEOUT,
):
self.address = address
self.timeout = timeout
self.closed = True
self._conn = None
self._conn_file = None
self._readable = True
self._writable = False
def split_address(self, address):
return split_address(address)
def open(self, mode='rw'):
host, port = self.split_address(self.address)
if mode == 'r':
self._set_readable()
elif mode == 'w':
self._set_writable()
elif mode == 'rw':
self._set_read_write()
try:
self._conn = socket.create_connection(
(host, port),
timeout=self.timeout
)
except socket.gaierror:
raise AddressNotFound("Could connect to {}:{}".format(
host,
port
))
except socket.timeout:
raise TimedOut("Connection to {}:{} timed out".format(
host,
port
))
self._conn_file = self._conn.makefile(mode)
def _set_readable(self):
self._readable = True
self._writable = False
def _set_writable(self):
self._readable = False
self._writable = True
def _set_read_write(self):
self._readable = True
self._writable = True
def _send(self, data):
self._conn_file.write(data)
self._conn_file.flush()
# File-like methods
def _check_conn(self):
if not self._conn or not self._conn_file:
raise NotConnected("Not connected to {}".format(
self.address
))
def close(self):
self._check_conn()
# Need to close both to be doing the right thing.
self._conn_file.close()
self._conn.close()
def readable(self):
return self._readable
def writable(self):
return self._writable
def read(self, size=-1):
if size <= -1:
return self.readall()
self._check_conn()
return self._conn_file.read(size)
def readall(self):
self._check_conn()
return self._conn_file.read()
def readline(self):
self._check_conn()
return self._conn_file.readline()
def readlines(self):
self._check_conn()
return self._conn_file.readlines()
def write(self, data):
return self._send(data)
def __iter__(self):
return self
def __next__(self):
return self.readline()
def next(self):
return self.__next__()
# Socket-specific methods
@property
def hostname(self):
return socket.gethostname()
@property
def fully_qualified_domain_name(self):
return socket.getfqdn()
fqdn = fully_qualified_domain_name
@property
def remote_ip(self):
self._check_conn()
return self._conn.getpeername()[0]
@property
def remote_port(self):
self._check_conn()
return self._conn.getpeername()[1]
@property
def local_ip(self):
self._check_conn()
return self._conn.getsockname()[0]
@property
def local_port(self):
self._check_conn()
return self._conn.getsockname()[1]
def resolve_dns(self, address=None):
if address is None:
address = self.address
host, port = self.split_address(address)
bits = socket.getaddrinfo(host, port)
return [bit[4] for bit in bits]
class NonBlockingSocket(object):
def __init__(self, address):
self.address = address
self._conn = None
self._buffer = ''
self._readable = False
self._writable = False
def split_address(self, address):
return split_address(address)
def _check_conn(self):
if not self._conn:
raise NotConnected("Not connected to {}".format(
self.address
))
def open(self, mode='rw'):
host, port = self.split_address(self.address)
if mode == 'rw':
self._readable = True
self._writable = True
elif mode == 'r':
self._readable = True
elif mode == 'w':
self._writable = True
try:
self._conn = socket.create_connection((host, port))
self._conn.setblocking(0)
except socket.gaierror:
raise AddressNotFound("Could connect to {}:{}".format(
host,
port
))
except socket.timeout:
raise TimedOut("Connection to {}:{} timed out".format(
host,
port
))
def close(self):
self._check_conn()
self._conn.close()
def select(self):
self._check_conn()
rlist = []
wlist = []
if self._readable:
rlist.append(self._conn)
if self._writable:
wlist.append(self._conn)
return select.select(rlist, wlist, [])
def readable(self):
rlist, wlist, xlist = self.select()
return len(rlist) > 0
def writable(self):
rlist, wlist, xlist = self.select()
return len(wlist) > 0
def read(self, size=4096):
rlist, wlist, xlist = self.select()
amount_read = 0
if not rlist:
return amount_read
rsock = rlist[0]
while rsock:
received = rsock.recv(size)
# Unfortunately, we can't detect broken connections here, since
# this method doesn't block until the send *actually* happens.
# Lots of false positivites trying to detect zero conditions.
amount_read += len(received)
self._buffer += received
rlist, wlist, xlist = self.select()
if rlist:
rsock = rlist[0]
else:
rsock = None
return amount_read
def readline(self):
lines = self.readlines(limit=1)
if not lines:
return None
return lines[0]
def readlines(self, limit=-1):
if limit == 0:
return []
while True:
rsize = self.read()
if not rsize:
break
if not self._buffer:
return []
if limit >= 0:
if not '\n' in self._buffer:
return None
bits = self._buffer.split('\n', 1)
if len(bits) < 2:
self._buffer = ''
else:
self._buffer = bits[1]
return [bits[0]]
lines = self._buffer.split('\n')
if not self._buffer.endswith('\n'):
self._buffer = lines.pop()
else:
self._buffer = ''
return lines
def write(self, data):
rlist, wlist, xlist = self.select()
if not wlist:
# FIXME: Not sure this should just fail. Should I buffer instead?
return False
wsock = wlist[0]
amount_sent = wsock.sendall(data)
return amount_sent
def __iter__(self):
return self
def __next__(self):
return self.readline()
@contextlib.contextmanager
def open(address, timeout=DEFAULT_TIMEOUT, mode='rw'):
sock = Socket(address, timeout=timeout)
sock.open(mode)
try:
yield sock
finally:
sock.close()
| {
"content_hash": "5df48ddcb5d6daad8d055a4d904ea881",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 78,
"avg_line_length": 23.266666666666666,
"alnum_prop": 0.5365042979942694,
"repo_name": "toastdriven/sockless",
"id": "78274cf6c8989b57e1a06c3080a8fb4bdfad934f",
"size": "8725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sockless.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10096"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:2627")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:2627")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| {
"content_hash": "c0fdb619fbea29fd914add096f3a9541",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 101,
"avg_line_length": 28.673590504451038,
"alnum_prop": 0.5684570009313877,
"repo_name": "zeurocoin-dev/zeurocoin",
"id": "7bf8016e58e03f9f7b317eadda93446a277336c9",
"size": "9663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "1108288"
},
{
"name": "C++",
"bytes": "5511626"
},
{
"name": "CSS",
"bytes": "122224"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "140730"
},
{
"name": "Makefile",
"bytes": "220015"
},
{
"name": "Objective-C",
"bytes": "7279"
},
{
"name": "Objective-C++",
"bytes": "7236"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "713547"
},
{
"name": "QMake",
"bytes": "27026"
},
{
"name": "Roff",
"bytes": "21607"
},
{
"name": "Shell",
"bytes": "433891"
}
],
"symlink_target": ""
} |
"""Parser for NWChem output files"""
import itertools
import re
import numpy
from . import logfileparser
from . import utils
class NWChem(logfileparser.Logfile):
"""An NWChem log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(NWChem, self).__init__(logname="NWChem", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "NWChem log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'NWChem("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of NWChem labels.
To normalise:
(1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta]
(2) replace any G or U by their lowercase equivalent
>>> sym = NWChem("dummyfile").normalisesym
>>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG']
>>> map(sym, labels)
['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g']
"""
# FIXME if necessary
return label
name2element = lambda self, lbl: "".join(itertools.takewhile(str.isalpha, str(lbl)))
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
#extract the version number first
if "Northwest Computational" in line:
self.metadata["package_version"] = line.split()[5]
# This is printed in the input module, so should always be the first coordinates,
# and contains some basic information we want to parse as well. However, this is not
# the only place where the coordinates are printed during geometry optimization,
# since the gradients module has a separate coordinate printout, which happens
# alongside the coordinate gradients. This geometry printout happens at the
# beginning of each optimization step only.
if line.strip() == 'Geometry "geometry" -> ""' or line.strip() == 'Geometry "geometry" -> "geometry"':
self.skip_lines(inputfile, ['dashes', 'blank', 'units', 'blank', 'header', 'dashes'])
if not hasattr(self, 'atomcoords'):
self.atomcoords = []
line = next(inputfile)
coords = []
atomnos = []
while line.strip():
# The column labeled 'tag' is usually empty, but I'm not sure whether it can have spaces,
# so for now assume that it can and that there will be seven columns in that case.
if len(line.split()) == 6:
index, atomname, nuclear, x, y, z = line.split()
else:
index, atomname, tag, nuclear, x, y, z = line.split()
coords.append(list(map(float, [x, y, z])))
atomnos.append(int(float(nuclear)))
line = next(inputfile)
self.atomcoords.append(coords)
self.set_attribute('atomnos', atomnos)
# If the geometry is printed in XYZ format, it will have the number of atoms.
if line[12:31] == "XYZ format geometry":
self.skip_line(inputfile, 'dashes')
natom = int(next(inputfile).strip())
self.set_attribute('natom', natom)
if line.strip() == "NWChem Geometry Optimization":
self.skip_lines(inputfile, ['d', 'b', 'b', 'b', 'b', 'title', 'b', 'b'])
line = next(inputfile)
while line.strip():
if "maximum gradient threshold" in line:
gmax = float(line.split()[-1])
if "rms gradient threshold" in line:
grms = float(line.split()[-1])
if "maximum cartesian step threshold" in line:
xmax = float(line.split()[-1])
if "rms cartesian step threshold" in line:
xrms = float(line.split()[-1])
line = next(inputfile)
self.set_attribute('geotargets', [gmax, grms, xmax, xrms])
# NWChem does not normally print the basis set for each atom, but rather
# chooses the concise option of printing Gaussian coefficients for each
# atom type/element only once. Therefore, we need to first parse those
# coefficients and afterwards build the appropriate gbasis attribute based
# on that and atom types/elements already parsed (atomnos). However, if atom
# are given different names (number after element, like H1 and H2), then NWChem
# generally prints the gaussian parameters for all unique names, like this:
#
# Basis "ao basis" -> "ao basis" (cartesian)
# -----
# O (Oxygen)
# ----------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 1.30709320E+02 0.154329
# 1 S 2.38088610E+01 0.535328
# (...)
#
# H1 (Hydrogen)
# -------------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 3.42525091E+00 0.154329
# (...)
#
# H2 (Hydrogen)
# -------------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 3.42525091E+00 0.154329
# (...)
#
# This current parsing code below assumes all atoms of the same element
# use the same basis set, but that might not be true, and this will probably
# need to be considered in the future when such a logfile appears.
if line.strip() == """Basis "ao basis" -> "ao basis" (cartesian)""":
self.skip_line(inputfile, 'dashes')
gbasis_dict = {}
line = next(inputfile)
while line.strip():
atomname = line.split()[0]
atomelement = self.name2element(atomname)
gbasis_dict[atomelement] = []
self.skip_lines(inputfile, ['d', 'labels', 'd'])
shells = []
line = next(inputfile)
while line.strip() and line.split()[0].isdigit():
shell = None
while line.strip():
nshell, type, exp, coeff = line.split()
nshell = int(nshell)
assert len(shells) == nshell - 1
if not shell:
shell = (type, [])
else:
assert shell[0] == type
exp = float(exp)
coeff = float(coeff)
shell[1].append((exp, coeff))
line = next(inputfile)
shells.append(shell)
line = next(inputfile)
gbasis_dict[atomelement].extend(shells)
gbasis = []
for i in range(self.natom):
atomtype = self.table.element[self.atomnos[i]]
gbasis.append(gbasis_dict[atomtype])
self.set_attribute('gbasis', gbasis)
# Normally the indexes of AOs assigned to specific atoms are also not printed,
# so we need to infer that. We could do that from the previous section,
# it might be worthwhile to take numbers from two different places, hence
# the code below, which builds atombasis based on the number of functions
# listed in this summary of the AO basis. Similar to previous section, here
# we assume all atoms of the same element have the same basis sets, but
# this will probably need to be revised later.
# The section we can glean info about aonmaes looks like:
#
# Summary of "ao basis" -> "ao basis" (cartesian)
# ------------------------------------------------------------------------------
# Tag Description Shells Functions and Types
# ---------------- ------------------------------ ------ ---------------------
# C sto-3g 3 5 2s1p
# H sto-3g 1 1 1s
#
# However, we need to make sure not to match the following entry lines:
#
# * Summary of "ao basis" -> "" (cartesian)
# * Summary of allocated global arrays
#
# Unfortantely, "ao basis" isn't unique because it can be renamed to anything for
# later reference: http://www.nwchem-sw.org/index.php/Basis
# It also appears that we have to handle cartesian vs. spherical
if line[1:11] == "Summary of":
match = re.match(' Summary of "([^\"]*)" -> "([^\"]*)" \((.+)\)', line)
if match and match.group(1) == match.group(2):
self.skip_lines(inputfile, ['d', 'title', 'd'])
self.shells = {}
self.shells["type"] = match.group(3)
atombasis_dict = {}
line = next(inputfile)
while line.strip():
atomname, desc, shells, funcs, types = line.split()
atomelement = self.name2element(atomname)
self.metadata["basis_set"] = desc
self.shells[atomname] = types
atombasis_dict[atomelement] = int(funcs)
line = next(inputfile)
last = 0
atombasis = []
for atom in self.atomnos:
atomelement = self.table.element[atom]
nfuncs = atombasis_dict[atomelement]
atombasis.append(list(range(last, last+nfuncs)))
last = atombasis[-1][-1] + 1
self.set_attribute('atombasis', atombasis)
# This section contains general parameters for Hartree-Fock calculations,
# which do not contain the 'General Information' section like most jobs.
if line.strip() == "NWChem SCF Module":
# If the calculation doesn't have a title specified, there
# aren't as many lines to skip here.
self.skip_lines(inputfile, ['d', 'b', 'b'])
line = next(inputfile)
if line.strip():
self.skip_lines(inputfile, ['b', 'b', 'b'])
line = next(inputfile)
while line.strip():
if line[2:8] == "charge":
charge = int(float(line.split()[-1]))
self.set_attribute('charge', charge)
if line[2:13] == "open shells":
unpaired = int(line.split()[-1])
self.set_attribute('mult', 2*unpaired + 1)
if line[2:7] == "atoms":
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
if line[2:11] == "functions":
nfuncs = int(line.split()[-1])
self.set_attribute("nbasis", nfuncs)
line = next(inputfile)
# This section contains general parameters for DFT calculations, as well as
# for the many-electron theory module.
if line.strip() == "General Information":
if hasattr(self, 'linesearch') and self.linesearch:
return
while line.strip():
if "No. of atoms" in line:
self.set_attribute('natom', int(line.split()[-1]))
if "Charge" in line:
self.set_attribute('charge', int(line.split()[-1]))
if "Spin multiplicity" in line:
mult = line.split()[-1]
if mult == "singlet":
mult = 1
self.set_attribute('mult', int(mult))
if "AO basis - number of function" in line:
nfuncs = int(line.split()[-1])
self.set_attribute('nbasis', nfuncs)
# These will be present only in the DFT module.
if "Convergence on energy requested" in line:
target_energy = self.float(line.split()[-1])
if "Convergence on density requested" in line:
target_density = self.float(line.split()[-1])
if "Convergence on gradient requested" in line:
target_gradient = self.float(line.split()[-1])
line = next(inputfile)
# Pretty nasty temporary hack to set scftargets only in the SCF module.
if "target_energy" in dir() and "target_density" in dir() and "target_gradient" in dir():
if not hasattr(self, 'scftargets'):
self.scftargets = []
self.scftargets.append([target_energy, target_density, target_gradient])
#DFT functional information
if "XC Information" in line:
line = next(inputfile)
line = next(inputfile)
self.metadata["functional"] = line.split()[0]
# If the full overlap matrix is printed, it looks like this:
#
# global array: Temp Over[1:60,1:60], handle: -996
#
# 1 2 3 4 5 6
# ----------- ----------- ----------- ----------- ----------- -----------
# 1 1.00000 0.24836 -0.00000 -0.00000 0.00000 0.00000
# 2 0.24836 1.00000 0.00000 -0.00000 0.00000 0.00030
# 3 -0.00000 0.00000 1.00000 0.00000 0.00000 -0.00014
# ...
if "global array: Temp Over[" in line:
self.set_attribute('nbasis', int(line.split('[')[1].split(',')[0].split(':')[1]))
self.set_attribute('nmo', int(line.split(']')[0].split(',')[1].split(':')[1]))
aooverlaps = []
while len(aooverlaps) < self.nbasis:
self.skip_line(inputfile, 'blank')
indices = [int(i) for i in inputfile.next().split()]
assert indices[0] == len(aooverlaps) + 1
self.skip_line(inputfile, "dashes")
data = [inputfile.next().split() for i in range(self.nbasis)]
indices = [int(d[0]) for d in data]
assert indices == list(range(1, self.nbasis+1))
for i in range(1, len(data[0])):
vector = [float(d[i]) for d in data]
aooverlaps.append(vector)
self.set_attribute('aooverlaps', aooverlaps)
if line.strip() in ("The SCF is already converged", "The DFT is already converged"):
if self.linesearch:
return
self.scftargets.append(self.scftargets[-1])
self.scfvalues.append(self.scfvalues[-1])
# The default (only?) SCF algorithm for Hartree-Fock is a preconditioned conjugate
# gradient method that apparently "always" converges, so this header should reliably
# signal a start of the SCF cycle. The convergence targets are also printed here.
if line.strip() == "Quadratically convergent ROHF":
if hasattr(self, 'linesearch') and self.linesearch:
return
while not "Final" in line:
# Only the norm of the orbital gradient is used to test convergence.
if line[:22] == " Convergence threshold":
target = float(line.split()[-1])
if not hasattr(self, "scftargets"):
self.scftargets = []
self.scftargets.append([target])
# This is critical for the stop condition of the section,
# because the 'Final Fock-matrix accuracy' is along the way.
# It would be prudent to find a more robust stop condition.
while list(set(line.strip())) != ["-"]:
line = next(inputfile)
if line.split() == ['iter', 'energy', 'gnorm', 'gmax', 'time']:
values = []
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
while line.strip():
it, energy, gnorm, gmax, time = line.split()
gnorm = self.float(gnorm)
values.append([gnorm])
try:
line = next(inputfile)
# Is this the end of the file for some reason?
except StopIteration:
self.logger.warning('File terminated before end of last SCF! Last gradient norm: {}'.format(gnorm))
break
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append(values)
try:
line = next(inputfile)
except StopIteration:
self.logger.warning('File terminated?')
break
# The SCF for DFT does not use the same algorithm as Hartree-Fock, but always
# seems to use the following format to report SCF convergence:
# convergence iter energy DeltaE RMS-Dens Diis-err time
# ---------------- ----- ----------------- --------- --------- --------- ------
# d= 0,ls=0.0,diis 1 -382.2544324446 -8.28D+02 1.42D-02 3.78D-01 23.2
# d= 0,ls=0.0,diis 2 -382.3017298534 -4.73D-02 6.99D-03 3.82D-02 39.3
# d= 0,ls=0.0,diis 3 -382.2954343173 6.30D-03 4.21D-03 7.95D-02 55.3
# ...
if line.split() == ['convergence', 'iter', 'energy', 'DeltaE', 'RMS-Dens', 'Diis-err', 'time']:
if hasattr(self, 'linesearch') and self.linesearch:
return
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
values = []
while line.strip():
# Sometimes there are things in between iterations with fewer columns,
# and we want to skip those lines, most probably. An exception might
# unrestricted calcualtions, which show extra RMS density and DIIS
# errors, although it is not clear yet whether these are for the
# beta orbitals or somethine else. The iterations look like this in that case:
# convergence iter energy DeltaE RMS-Dens Diis-err time
# ---------------- ----- ----------------- --------- --------- --------- ------
# d= 0,ls=0.0,diis 1 -382.0243202601 -8.28D+02 7.77D-03 1.04D-01 30.0
# 7.68D-03 1.02D-01
# d= 0,ls=0.0,diis 2 -382.0647539758 -4.04D-02 4.64D-03 1.95D-02 59.2
# 5.39D-03 2.36D-02
# ...
if len(line[17:].split()) == 6:
iter, energy, deltaE, dens, diis, time = line[17:].split()
val_energy = self.float(deltaE)
val_density = self.float(dens)
val_gradient = self.float(diis)
values.append([val_energy, val_density, val_gradient])
try:
line = next(inputfile)
# Is this the end of the file for some reason?
except StopIteration:
self.logger.warning('File terminated before end of last SCF! Last error: {}'.format(diis))
break
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append(values)
# These triggers are supposed to catch the current step in a geometry optimization search
# and determine whether we are currently in the main (initial) SCF cycle of that step
# or in the subsequent line search. The step is printed between dashes like this:
#
# --------
# Step 0
# --------
#
# and the summary lines that describe the main SCF cycle for the frsit step look like this:
#
#@ Step Energy Delta E Gmax Grms Xrms Xmax Walltime
#@ ---- ---------------- -------- -------- -------- -------- -------- --------
#@ 0 -379.76896249 0.0D+00 0.04567 0.01110 0.00000 0.00000 4.2
# ok ok
#
# However, for subsequent step the format is a bit different:
#
# Step Energy Delta E Gmax Grms Xrms Xmax Walltime
# ---- ---------------- -------- -------- -------- -------- -------- --------
#@ 2 -379.77794602 -7.4D-05 0.00118 0.00023 0.00440 0.01818 14.8
# ok
#
# There is also a summary of the line search (which we don't use now), like this:
#
# Line search:
# step= 1.00 grad=-1.8D-05 hess= 8.9D-06 energy= -379.777955 mode=accept
# new step= 1.00 predicted energy= -379.777955
#
if line[10:14] == "Step":
self.geostep = int(line.split()[-1])
self.skip_line(inputfile, 'dashes')
self.linesearch = False
if line[0] == "@" and line.split()[1] == "Step":
at_and_dashes = next(inputfile)
line = next(inputfile)
assert int(line.split()[1]) == self.geostep == 0
gmax = float(line.split()[4])
grms = float(line.split()[5])
xrms = float(line.split()[6])
xmax = float(line.split()[7])
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append([gmax, grms, xmax, xrms])
self.linesearch = True
if line[2:6] == "Step":
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
assert int(line.split()[1]) == self.geostep
if self.linesearch:
#print(line)
return
gmax = float(line.split()[4])
grms = float(line.split()[5])
xrms = float(line.split()[6])
xmax = float(line.split()[7])
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append([gmax, grms, xmax, xrms])
self.linesearch = True
# There is a clear message when the geometry optimization has converged:
#
# ----------------------
# Optimization converged
# ----------------------
#
if line.strip() == "Optimization converged":
self.skip_line(inputfile, 'dashes')
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
if "Failed to converge" in line and hasattr(self, 'geovalues'):
if not hasattr(self, 'optdone'):
self.optdone = []
# extract the theoretical method
if "Total SCF energy" in line:
self.metadata["methods"].append("HF")
if "Total DFT energy" in line:
self.metadata["methods"].append("DFT")
# The line containing the final SCF energy seems to be always identifiable like this.
if "Total SCF energy" in line or "Total DFT energy" in line:
# NWChem often does a line search during geometry optimization steps, reporting
# the SCF information but not the coordinates (which are not necessarily 'intermediate'
# since the step size can become smaller). We want to skip these SCF cycles,
# unless the coordinates can also be extracted (possibly from the gradients?).
if hasattr(self, 'linesearch') and self.linesearch:
return
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = float(line.split()[-1])
energy = utils.convertor(energy, "hartree", "eV")
self.scfenergies.append(energy)
# The final MO orbitals are printed in a simple list, but apparently not for
# DFT calcs, and often this list does not contain all MOs, so make sure to
# parse them from the MO analysis below if possible. This section will be like this:
#
# Symmetry analysis of molecular orbitals - final
# -----------------------------------------------
#
# Numbering of irreducible representations:
#
# 1 ag 2 au 3 bg 4 bu
#
# Orbital symmetries:
#
# 1 bu 2 ag 3 bu 4 ag 5 bu
# 6 ag 7 bu 8 ag 9 bu 10 ag
# ...
if line.strip() == "Symmetry analysis of molecular orbitals - final":
self.skip_lines(inputfile, ['d', 'b', 'numbering', 'b', 'reps', 'b', 'syms', 'b'])
if not hasattr(self, 'mosyms'):
self.mosyms = [[None]*self.nbasis]
line = next(inputfile)
while line.strip():
ncols = len(line.split())
assert ncols % 2 == 0
for i in range(ncols//2):
index = int(line.split()[i*2]) - 1
sym = line.split()[i*2+1]
sym = sym[0].upper() + sym[1:]
if self.mosyms[0][index]:
if self.mosyms[0][index] != sym:
self.logger.warning("Symmetry of MO %i has changed" % (index+1))
self.mosyms[0][index] = sym
line = next(inputfile)
# The same format is used for HF and DFT molecular orbital analysis. We want to parse
# the MO energies from this section, although it is printed already before this with
# less precision (might be useful to parse that if this is not available). Also, this
# section contains coefficients for the leading AO contributions, so it might also
# be useful to parse and use those values if the full vectors are not printed.
#
# The block looks something like this (two separate alpha/beta blocks in the unrestricted case):
#
# ROHF Final Molecular Orbital Analysis
# -------------------------------------
#
# Vector 1 Occ=2.000000D+00 E=-1.104059D+01 Symmetry=bu
# MO Center= 1.4D-17, 0.0D+00, -6.5D-37, r^2= 2.1D+00
# Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
# ----- ------------ --------------- ----- ------------ ---------------
# 1 0.701483 1 C s 6 -0.701483 2 C s
#
# Vector 2 Occ=2.000000D+00 E=-1.104052D+01 Symmetry=ag
# ...
# Vector 12 Occ=2.000000D+00 E=-1.020253D+00 Symmetry=bu
# MO Center= -1.4D-17, -5.6D-17, 2.9D-34, r^2= 7.9D+00
# Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
# ----- ------------ --------------- ----- ------------ ---------------
# 36 -0.298699 11 C s 41 0.298699 12 C s
# 2 0.270804 1 C s 7 -0.270804 2 C s
# 48 -0.213655 15 C s 53 0.213655 16 C s
# ...
#
if "Final" in line and "Molecular Orbital Analysis" in line:
# Unrestricted jobs have two such blocks, for alpha and beta orbitals, and
# we need to keep track of which one we're parsing (always alpha in restricted case).
unrestricted = ("Alpha" in line) or ("Beta" in line)
alphabeta = int("Beta" in line)
self.skip_lines(inputfile, ['dashes', 'blank'])
nvectors = []
mooccnos = []
energies = []
symmetries = [None]*self.nbasis
line = next(inputfile)
while line[:7] == " Vector":
# Note: the vector count starts from 1 in NWChem.
nvector = int(line[7:12])
nvectors.append(nvector)
# A nonzero occupancy for SCF jobs means the orbital is occupied.
mooccno = int(self.float(line[18:30]))
mooccnos.append(mooccno)
# If the printout does not start from the first MO, assume None for all previous orbitals.
if len(energies) == 0 and nvector > 1:
for i in range(1, nvector):
energies.append(None)
energy = self.float(line[34:47])
energy = utils.convertor(energy, "hartree", "eV")
energies.append(energy)
# When symmetry is not used, this part of the line is missing.
if line[47:58].strip() == "Symmetry=":
sym = line[58:].strip()
sym = sym[0].upper() + sym[1:]
symmetries[nvector-1] = sym
line = next(inputfile)
if "MO Center" in line:
line = next(inputfile)
if "Bfn." in line:
line = next(inputfile)
if "-----" in line:
line = next(inputfile)
while line.strip():
line = next(inputfile)
line = next(inputfile)
self.set_attribute('nmo', nvector)
if not hasattr(self, 'moenergies') or (len(self.moenergies) > alphabeta):
self.moenergies = []
self.moenergies.append(energies)
if not hasattr(self, 'mosyms') or (len(self.mosyms) > alphabeta):
self.mosyms = []
self.mosyms.append(symmetries)
if not hasattr(self, 'homos') or (len(self.homos) > alphabeta):
self.homos = []
nvector_index = mooccnos.index(0) - 1
if nvector_index > -1:
self.homos.append(nvectors[nvector_index] - 1)
else:
self.homos.append(-1)
# If this was a restricted open-shell calculation, append
# to HOMOs twice since only one Molecular Orbital Analysis
# section is in the output file.
if (not unrestricted) and (1 in mooccnos):
nvector_index = mooccnos.index(1) - 1
if nvector_index > -1:
self.homos.append(nvectors[nvector_index] - 1)
else:
self.homos.append(-1)
# This is where the full MO vectors are printed, but a special directive is needed for it:
#
# Final MO vectors
# ----------------
#
#
# global array: alpha evecs[1:60,1:60], handle: -995
#
# 1 2 3 4 5 6
# ----------- ----------- ----------- ----------- ----------- -----------
# 1 -0.69930 -0.69930 -0.02746 -0.02769 -0.00313 -0.02871
# 2 -0.03156 -0.03135 0.00410 0.00406 0.00078 0.00816
# 3 0.00002 -0.00003 0.00067 0.00065 -0.00526 -0.00120
# ...
#
if line.strip() == "Final MO vectors":
if not hasattr(self, 'mocoeffs'):
self.mocoeffs = []
self.skip_lines(inputfile, ['d', 'b', 'b'])
# The columns are MOs, rows AOs, but that's and educated guess since no
# atom information is printed alongside the indices. This next line gives
# the dimensions, which we can check. if set before this. Also, this line
# specifies whether we are dealing with alpha or beta vectors.
array_info = next(inputfile)
while ("global array" in array_info):
alphabeta = int(line.split()[2] == "beta")
size = array_info.split('[')[1].split(']')[0]
nbasis = int(size.split(',')[0].split(':')[1])
nmo = int(size.split(',')[1].split(':')[1])
self.set_attribute('nbasis', nbasis)
self.set_attribute('nmo', nmo)
self.skip_line(inputfile, 'blank')
mocoeffs = []
while len(mocoeffs) < self.nmo:
nmos = list(map(int, next(inputfile).split()))
assert len(mocoeffs) == nmos[0] - 1
for n in nmos:
mocoeffs.append([])
self.skip_line(inputfile, 'dashes')
for nb in range(nbasis):
line = next(inputfile)
index = int(line.split()[0])
assert index == nb+1
coefficients = list(map(float, line.split()[1:]))
assert len(coefficients) == len(nmos)
for i, c in enumerate(coefficients):
mocoeffs[nmos[i]-1].append(c)
self.skip_line(inputfile, 'blank')
self.mocoeffs.append(mocoeffs)
array_info = next(inputfile)
# For Hartree-Fock, the atomic Mulliken charges are typically printed like this:
#
# Mulliken analysis of the total density
# --------------------------------------
#
# Atom Charge Shell Charges
# ----------- ------ -------------------------------------------------------
# 1 C 6 6.00 1.99 1.14 2.87
# 2 C 6 6.00 1.99 1.14 2.87
# ...
if line.strip() == "Mulliken analysis of the total density":
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
charges = []
line = next(inputfile)
while line.strip():
index, atomname, nuclear, atom = line.split()[:4]
shells = line.split()[4:]
charges.append(float(atom)-float(nuclear))
line = next(inputfile)
self.atomcharges['mulliken'] = charges
# Not the the 'overlap population' as printed in the Mulliken population analysis,
# is not the same thing as the 'overlap matrix'. In fact, it is the overlap matrix
# multiplied elementwise times the density matrix.
#
# ----------------------------
# Mulliken population analysis
# ----------------------------
#
# ----- Total overlap population -----
#
# 1 2 3 4 5 6 7
#
# 1 1 C s 2.0694818227 -0.0535883400 -0.0000000000 -0.0000000000 -0.0000000000 -0.0000000000 0.0000039991
# 2 1 C s -0.0535883400 0.8281341291 0.0000000000 -0.0000000000 0.0000000000 0.0000039991 -0.0009906747
# ...
#
# DFT does not seem to print the separate listing of Mulliken charges
# by default, but they are printed by this modules later on. They are also print
# for Hartree-Fock runs, though, so in that case make sure they are consistent.
if line.strip() == "Mulliken population analysis":
self.skip_lines(inputfile, ['d', 'b', 'total_overlap_population', 'b'])
overlaps = []
line = next(inputfile)
while all([c.isdigit() for c in line.split()]):
# There is always a line with the MO indices printed in thie block.
indices = [int(i)-1 for i in line.split()]
for i in indices:
overlaps.append([])
# There is usually a blank line after the MO indices, but
# there are exceptions, so check if line is blank first.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
# Now we can iterate or atomic orbitals.
for nao in range(self.nbasis):
data = list(map(float, line.split()[4:]))
for i, d in enumerate(data):
overlaps[indices[i]].append(d)
line = next(inputfile)
line = next(inputfile)
# This header should be printed later, before the charges are print, which of course
# are just sums of the overlaps and could be calculated. But we just go ahead and
# parse them, make sure they're consistent with previously parsed values and
# use these since they are more precise (previous precision could have been just 0.01).
while "Total gross population on atoms" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'blank')
charges = []
for i in range(self.natom):
line = next(inputfile)
iatom, element, ncharge, epop = line.split()
iatom = int(iatom)
ncharge = float(ncharge)
epop = float(epop)
assert iatom == (i+1)
charges.append(epop-ncharge)
if not hasattr(self, 'atomcharges'):
self.atomcharges = {}
if not "mulliken" in self.atomcharges:
self.atomcharges['mulliken'] = charges
else:
assert max(self.atomcharges['mulliken'] - numpy.array(charges)) < 0.01
self.atomcharges['mulliken'] = charges
# NWChem prints the dipole moment in atomic units first, and we could just fast forward
# to the values in Debye, which are also printed. But we can also just convert them
# right away and so parse a little bit less. Note how the reference point is print
# here within the block nicely, as it is for all moment later.
#
# -------------
# Dipole Moment
# -------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# Dipole moment 0.0000000000 Debye(s)
# DMX 0.0000000000 DMXEFC 0.0000000000
# DMY 0.0000000000 DMYEFC 0.0000000000
# DMZ -0.0000000000 DMZEFC 0.0000000000
#
# ...
#
if line.strip() == "Dipole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_line(inputfile, 'blank')
magnitude = next(inputfile)
assert magnitude.split()[-1] == "A.U."
dipole = []
for i in range(3):
line = next(inputfile)
dipole.append(float(line.split()[1]))
dipole = utils.convertor(numpy.array(dipole), "ebohr", "Debye")
if not hasattr(self, 'moments'):
self.moments = [self.reference, dipole]
else:
self.moments[1] == dipole
# The quadrupole moment is pretty straightforward to parse. There are several
# blocks printed, and the first one called 'second moments' contains the raw
# moments, and later traceless values are printed. The moments, however, are
# not in lexicographical order, so we need to sort them. Also, the first block
# is in atomic units, so remember to convert to Buckinghams along the way.
#
# -----------------
# Quadrupole Moment
# -----------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# < R**2 > = ********** a.u. ( 1 a.u. = 0.280023 10**(-16) cm**2 )
# ( also called diamagnetic susceptibility )
#
# Second moments in atomic units
#
# Component Electronic+nuclear Point charges Total
# --------------------------------------------------------------------------
# XX -38.3608511210 0.0000000000 -38.3608511210
# YY -39.0055467347 0.0000000000 -39.0055467347
# ...
#
if line.strip() == "Quadrupole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_lines(inputfile, ['b', 'units', 'susc', 'b'])
line = next(inputfile)
assert line.strip() == "Second moments in atomic units"
self.skip_lines(inputfile, ['b', 'header', 'd'])
# Parse into a dictionary and then sort by the component key.
quadrupole = {}
for i in range(6):
line = next(inputfile)
quadrupole[line.split()[0]] = float(line.split()[-1])
lex = sorted(quadrupole.keys())
quadrupole = [quadrupole[key] for key in lex]
quadrupole = utils.convertor(numpy.array(quadrupole), "ebohr2", "Buckingham")
# The checking of potential previous values if a bit more involved here,
# because it turns out NWChem has separate keywords for dipole, quadrupole
# and octupole output. So, it is perfectly possible to print the quadrupole
# and not the dipole... if that is the case set the former to None and
# issue a warning. Also, a regression has been added to cover this case.
if not hasattr(self, 'moments') or len(self.moments) < 2:
self.logger.warning("Found quadrupole moments but no previous dipole")
self.moments = [self.reference, None, quadrupole]
else:
if len(self.moments) == 2:
self.moments.append(quadrupole)
else:
assert self.moments[2] == quadrupole
# The octupole moment is analogous to the quadrupole, but there are more components
# and the checking of previously parsed dipole and quadrupole moments is more involved,
# with a corresponding test also added to regressions.
#
# ---------------
# Octupole Moment
# ---------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# Third moments in atomic units
#
# Component Electronic+nuclear Point charges Total
# --------------------------------------------------------------------------
# XXX -0.0000000000 0.0000000000 -0.0000000000
# YYY -0.0000000000 0.0000000000 -0.0000000000
# ...
#
if line.strip() == "Octupole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_line(inputfile, 'blank')
line = next(inputfile)
assert line.strip() == "Third moments in atomic units"
self.skip_lines(inputfile, ['b', 'header', 'd'])
octupole = {}
for i in range(10):
line = next(inputfile)
octupole[line.split()[0]] = float(line.split()[-1])
lex = sorted(octupole.keys())
octupole = [octupole[key] for key in lex]
octupole = utils.convertor(numpy.array(octupole), "ebohr3", "Debye.ang2")
if not hasattr(self, 'moments') or len(self.moments) < 2:
self.logger.warning("Found octupole moments but no previous dipole or quadrupole moments")
self.moments = [self.reference, None, None, octupole]
elif len(self.moments) == 2:
self.logger.warning("Found octupole moments but no previous quadrupole moments")
self.moments.append(None)
self.moments.append(octupole)
else:
if len(self.moments) == 3:
self.moments.append(octupole)
else:
assert self.moments[3] == octupole
if "Total MP2 energy" in line:
self.metadata["methods"].append("MP2")
mpenerg = float(line.split()[-1])
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
self.mpenergies[-1].append(utils.convertor(mpenerg, "hartree", "eV"))
if "CCSD total energy / hartree" in line or "total CCSD energy:" in line:
self.metadata["methods"].append("CCSD")
ccenerg = float(line.split()[-1])
if not hasattr(self, "ccenergies"):
self.ccenergies = []
self.ccenergies.append([])
self.ccenergies[-1].append(utils.convertor(ccenerg, "hartree", "eV"))
if "CCSD(T) total energy / hartree" in line:
self.metadata["methods"].append("CCSD(T)")
ccenerg = float(line.split()[-1])
if not hasattr(self, "ccenergies"):
self.ccenergies = []
self.ccenergies.append([])
self.ccenergies[-1].append(utils.convertor(ccenerg, "hartree", "eV"))
# Static and dynamic polarizability.
if "Linear Response polarizability / au" in line:
if not hasattr(self, "polarizabilities"):
self.polarizabilities = []
polarizability = []
line = next(inputfile)
assert line.split()[0] == "Frequency"
line = next(inputfile)
assert line.split()[0] == "Wavelength"
self.skip_lines(inputfile, ['coordinates', 'd'])
for _ in range(3):
line = next(inputfile)
polarizability.append(line.split()[1:])
self.polarizabilities.append(numpy.array(polarizability))
def after_parsing(self):
"""NWChem-specific routines for after parsing file.
Currently, expands self.shells() into self.aonames.
"""
# setup a few necessary things, including a regular expression
# for matching the shells
table = utils.PeriodicTable()
elements = [table.element[x] for x in self.atomnos]
pattern = re.compile("(\ds)+(\dp)*(\dd)*(\df)*(\dg)*")
labels = {}
labels['s'] = ["%iS"]
labels['p'] = ["%iPX", "%iPY", "%iPZ"]
if self.shells['type'] == 'spherical':
labels['d'] = ['%iD-2', '%iD-1', '%iD0', '%iD1', '%iD2']
labels['f'] = ['%iF-3', '%iF-2', '%iF-1', '%iF0',
'%iF1', '%iF2', '%iF3']
labels['g'] = ['%iG-4', '%iG-3', '%iG-2', '%iG-1', '%iG0',
'%iG1', '%iG2', '%iG3', '%iG4']
elif self.shells['type'] == 'cartesian':
labels['d'] = ['%iDXX', '%iDXY', '%iDXZ',
'%iDYY', '%iDYZ',
'%iDZZ']
labels['f'] = ['%iFXXX', '%iFXXY', '%iFXXZ',
'%iFXYY', '%iFXYZ', '%iFXZZ',
'%iFYYY', '%iFYYZ', '%iFYZZ',
'%iFZZZ']
labels['g'] = ['%iGXXXX', '%iGXXXY', '%iGXXXZ',
'%iGXXYY', '%iGXXYZ', '%iGXXZZ',
'%iGXYYY', '%iGXYYZ', '%iGXYZZ',
'%iGXZZZ', '%iGYYYY', '%iGYYYZ',
'%iGYYZZ', '%iGYZZZ', '%iGZZZZ']
else:
self.logger.warning("Found a non-standard aoname representation type.")
return
# now actually build aonames
# involves expanding 2s1p into appropriate types
self.aonames = []
for i, element in enumerate(elements):
try:
shell_text = self.shells[element]
except KeyError:
del self.aonames
msg = "Cannot determine aonames for at least one atom."
self.logger.warning(msg)
break
prefix = "%s%i_" % (element, i + 1) # (e.g. C1_)
matches = pattern.match(shell_text)
for j, group in enumerate(matches.groups()):
if group is None:
continue
count = int(group[:-1])
label = group[-1]
for k in range(count):
temp = [x % (j + k + 1) for x in labels[label]]
self.aonames.extend([prefix + x for x in temp])
if __name__ == "__main__":
import doctest, nwchemparser
doctest.testmod(nwchemparser, verbose=False)
| {
"content_hash": "2b9e34233e2874f60e127a495c75980b",
"timestamp": "",
"source": "github",
"line_count": 1114,
"max_line_length": 137,
"avg_line_length": 46.76750448833034,
"alnum_prop": 0.4770341081402714,
"repo_name": "Schamnad/cclib",
"id": "112bacd730a041ee5da868e60d073dffec10bd2b",
"size": "52302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cclib/parser/nwchemparser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arc",
"bytes": "18395"
},
{
"name": "DIGITAL Command Language",
"bytes": "21581"
},
{
"name": "Python",
"bytes": "836753"
},
{
"name": "Shell",
"bytes": "867"
},
{
"name": "TeX",
"bytes": "29388"
}
],
"symlink_target": ""
} |
import os
import re
from setuptools import setup, find_packages
__description__ = 'Pagar.me Python'
__long_description__ = 'Python library for Pagar.me API'
__author__ = 'Murilo Henrique, Victor Messina'
__author_email__ = 'suporte@pagar.me'
__special_things__ = 'Derek Stavis, Rodrigo Amaral'
testing_extras = [
'pytest',
'pytest-cov',
]
def _find_version():
filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'pagarme/sdk.py'
)
with open(filename) as f:
data = f.read()
match = re.search(r"VERSION = '(.+)'", data)
return match.groups()[0]
__version__ = _find_version()
install_requires = open('requirements.txt').read().strip().split('\n')
setup(
name='pagarme-python',
version=__version__,
author=__author__,
author_email=__author_email__,
packages=find_packages(),
license='MIT',
description=__description__,
long_description=__long_description__,
special_things=__special_things__,
url='https://github.com/pagarme/pagarme-python',
keywords='Payment, pagarme',
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Environment :: Web Environment',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest'],
extras_require={
'testing': testing_extras,
},
)
| {
"content_hash": "f7f78e24947f2decd246e7392a5bf59d",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 70,
"avg_line_length": 26.863636363636363,
"alnum_prop": 0.6153412295544275,
"repo_name": "pagarme/pagarme-python",
"id": "514d6d5aaca3b23ef4af69ffad1e4b99fd19c64c",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57259"
}
],
"symlink_target": ""
} |
import itertools
from ..task import DecisionTask, ActivityTask
from ..events import ProcessStartedEvent
from ..defaults import Defaults
class Manager(object):
"""
Handles the creation and execution of workflows.
# Start a new process
mgr = manager(backend, workflows)
process = Process(workflow=FooWorkflow, tags=["foo", "bar"])
mgr.start_process(process)
# Find the process
mgr.processes(workflow=FooWorkflow, tag="foo")
# Query an activity, execute and commit result
task = mgr.next()
result = activity(task)
mgr.complete_task(task, result)
"""
def __init__(self, backend, workflows=[]):
self._backend = backend
self._workflows = dict((workflow.name, workflow) for workflow in workflows)
activities = list(itertools.chain(*map(lambda w: w.activities, workflows)))
self._activities = dict((a.name, a) for a in activities)
map(lambda w: self._register_workflow_with_backend(w), workflows)
map(lambda a: self._register_activity_with_backend(a), activities)
def _register_workflow_with_backend(self, workflow):
kwargs = {
'category': workflow.category,
'timeout': workflow.timeout,
'decision_timeout': workflow.decision_timeout
}
self._backend.register_workflow(workflow.name, **kwargs)
def _register_activity_with_backend(self, activity):
kwargs = {
'category': activity.category,
'scheduled_timeout': activity.scheduled_timeout,
'execution_timeout': activity.execution_timeout,
'heartbeat_timeout': activity.heartbeat_timeout
}
self._backend.register_activity(activity.name, **kwargs)
def start_process(self, process):
return self._backend.start_process(process)
def signal_process(self, process_or_id, signal):
process_id = getattr(process_or_id, 'id', process_or_id)
self._backend.signal_process(process_id, signal.name, signal.data)
def cancel_process(self, process_or_id, details=None):
process_id = getattr(process_or_id, 'id', process_or_id)
self._backend.cancel_process(process_id, details=details)
def heartbeat(self, task):
self._backend.heartbeat_activity_task(task)
def process_by_id(self, process_id):
return self._backend.process_by_id(process_id)
def processes(self, workflow=None, tag=None):
workflow_name = None
if workflow:
workflow_name = workflow.name if hasattr(workflow, 'name') else str(workflow)
return self._backend.processes(workflow=workflow_name, tag=tag)
def next_decision(self, identity=None, category=Defaults.DECISION_CATEGORY):
return self._backend.poll_decision_task(identity=identity, category=category)
def next_activity(self, identity=None, category=Defaults.ACTIVITY_CATEGORY):
return self._backend.poll_activity_task(identity=identity, category=category)
def workflow_for_task(self, task):
workflow_cls = self._workflows[task.process.workflow]
return workflow_cls()
def activity_for_task(self, task, monitor=None):
activity_cls = self._activities[task.activity_execution.activity]
return activity_cls(task=task, monitor=monitor)
def complete_task(self, task, result):
if isinstance(task, DecisionTask):
self._backend.complete_decision_task(task, result)
elif isinstance(task, ActivityTask):
self._backend.complete_activity_task(task, result)
else:
raise ValueError('unsupported task type')
def copy_with_backend(self, backend):
return Manager(backend, self._workflows.values())
def __repr__(self):
return 'Manager(%s)' % self._backend.__class__.__name__ | {
"content_hash": "d6e9eaee5ad4187f19db2bd8c730d577",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 89,
"avg_line_length": 37.398058252427184,
"alnum_prop": 0.6612149532710281,
"repo_name": "pyworkflow/pyworkflow",
"id": "0b23651366fc177f5104d8393aa51c8e8021bcdf",
"size": "3852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyworkflow/managed/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88469"
}
],
"symlink_target": ""
} |
"""
relatrix.py
A class for relating residues using Rosetta numbering, PDB ATOM numbering, SEQRES/FASTA sequences, and UniParc sequences.
Created by Shane O'Connor 2013
"""
import types
import traceback
from .fasta import FASTA
from .pdb import PDB, PDBMissingMainchainAtomsException, ROSETTA_HACKS_residues_to_remove
from .pdbml import PDBML
from .clustalo import PDBUniParcSequenceAligner, MultipleAlignmentException
from klab import colortext
from .basics import Sequence, SequenceMap, UniParcPDBSequenceMap
from .sifts import SIFTS, MissingSIFTSRecord, BadSIFTSMapping, NoSIFTSPDBUniParcMapping
use_seqres_sequence_for_fasta_sequence = set(['1A2C', '4CPA', '2ATC', '1OLR'])
use_fasta_sequence_for_seqres_sequence = set(['1DEQ'])
# In these
use_SIFTS_match_for_seqres_sequence = set([
('1AAR', 'A'), ('1AAR', 'B'), # not surprising since this is Polyubiquitin-C
('1AR1', 'D'), # for seqres to uniparc, chain D, Clustal maps 94->None, 95->None, 96->95, 97->96 whereas SIFTS maps 94->94, 95->95, 96->None, 97->96. Either mapping seems acceptable on a purely sequential level (although SIFTS seems better) but I am assuming that the SIFTS mapping makes more sense.
('1BF4', 'A'), # for seqres to uniparc, chain A, Clustal maps 35->36, 36->None, 37->None, 38->38 whereas SIFTS maps 35->36, 36->37, 37->38, 38->None. Either mapping seems acceptable on a purely sequential level (although SIFTS seems better) but I am assuming that the SIFTS mapping makes more sense.
('1CPM', 'A'), # DBREF / UniProt says UPI000012BD97, SIFTS says UPI000005F74B. Either fits from a purely sequential standpoint as these two proteins have the same sequence apart from the 38th residue which is R in the former and P in the latter
('1DEQ', 'C'), ('1DEQ', 'F'), ('1DEQ', 'P'), ('1DEQ', 'S'), # Clustal maps 222->247, SIFTS maps 222->245. Either one is correct - the sequence in GTG where one G is mapped to and the TG (or GT) is unmapped.
('1OTR', 'B'), # Poly-ubiquitin. Clustal matches at position 153 (3rd copy), SIFTS matches at position 305 (5th and last copy).
('1UBQ', 'A'), # Poly-ubiquitin. Clustal matches at position 305 (5th copy), SIFTS matches at position 609 (9th and last full copy). Clustal also maps the final residue weirdly (to the final residue of the 6th copy rather than the 5th copy)
('1SCE', 'A'), ('1SCE', 'B'), ('1SCE', 'C'), ('1SCE', 'D'), # SIFTS has a better mapping
('1ORC', 'A'), # Both matches are valid but neither are ideal (I would have done 53->53,...56->56,...62->57, 63->58,... skipping the inserted residues with ATOM IDs 56A-56E). I'll go with SIFTS since that is somewhat standard.
])
known_bad_clustal_to_sifts_mappings = set([
('1AAR', 'A'), ('1AAR', 'B'),
('1BF4', 'A'),
('1CPM', 'A'),
('1DEQ', 'C'), ('1DEQ', 'F'), ('1DEQ', 'P'), ('1DEQ', 'S'), # the residue types actually agree but the key check will fail
('1SCE', 'A'), ('1SCE', 'B'), ('1SCE', 'C'), ('1SCE', 'D'),
('487D', 'H'), ('487D', 'I'), ('487D', 'J'), ('487D', 'K'), ('487D', 'L'), ('487D', 'M'), ('487D', 'N'), # The mapping SIFTS gets (from UniProt) is not as close as the DBREF entries
('1ORC', 'A'), # the residue types actually agree but the key check will fail
])
do_not_use_SIFTS_for_these_chains = set([
('1HGH', 'A'), # The DBREF match (UPI000012C4CD) is better in terms of sequence and the reference also states that the A/Aichi/2/1968 (H3N2) isolate is under study
('1URK', 'A'), # The DBREF/UniProt match (UPI00002BDB5) is a super-sequence of the SIFTS match (UPI000002C604)
('2ATC', 'B'), # Clustal maps the K129 lysine deletion correctly, mapping RRADD->R(K)RAND. SIFTS maps RRADD->RKR(A)ND
('487D', 'H'), ('487D', 'I'), ('487D', 'J'), ('487D', 'K'), ('487D', 'L'), ('487D', 'M'), ('487D', 'N'), # The mapping SIFTS gets (from UniProt) is not as close as the DBREF entries
('1E6N', 'A'), ('1E6N', 'B'), # The DBREF match (UPI0000000F1C) matches the sequence. The UniProt SIFTS match (UPI00000B96E8) is not exact but is close. I don't know which is the better match but I chose the DBREF one.
])
pdbs_with_do_not_use_SIFTS_for_these_chains = set([p[0] for p in do_not_use_SIFTS_for_these_chains])
do_not_use_the_sequence_aligner = set([
'1CBW',
'1KJ1', # The sequences are too close for Clustal to match properly
'1M7T', # Chimera. SIFTS maps this properly
'1Z1I', # This (306 residues) maps to P0C6U8/UPI000018DB89 (Replicase polyprotein 1a, 4,382 residues) A 3241-3546, the '3C-like proteinase' region
# and P0C6X7/UPI000019098F (Replicase polyprotein 1ab, 7,073 residues) A 3241-3546, the '3C-like proteinase' region
# which gives our Clustal sequence aligner an ambiguous mapping. Both are valid mappings. SIFTS chooses the the longer one, UPI000019098F.
])
class ResidueRelatrix(object):
''' A class for relating residue IDs from different schemes.
Note: we assume throughout that there is one map from SEQRES to UniParc. This is not always true e.g. Polyubiquitin-C (UPI000000D74D) has 9 copies of the ubiquitin sequence.'''
schemes = ['rosetta', 'atom', 'seqres', 'fasta', 'uniparc']
def __init__(self, pdb_id, rosetta_scripts_path, rosetta_database_path = None, chains_to_keep = [], min_clustal_cut_off = 80, cache_dir = None, silent = False, acceptable_sequence_percentage_match = 80.0, acceptable_sifts_sequence_percentage_match = None, starting_clustal_cut_off = 100, bio_cache = None): # keep_HETATMS = False
''' acceptable_sequence_percentage_match is used when checking whether the SEQRES sequences have a mapping. Usually
90.00% works but some cases e.g. 1AR1, chain C, have a low matching score mainly due to extra residues. I set
this to 80.00% to cover most cases.'''
# todo: add an option to not use the Clustal sequence aligner and only use the SIFTS mapping. This could be useful for a web interface where we do not want to have to fix things manually.
if acceptable_sifts_sequence_percentage_match == None:
acceptable_sifts_sequence_percentage_match = acceptable_sequence_percentage_match
assert(0.0 <= acceptable_sequence_percentage_match <= 100.0)
assert(0.0 <= acceptable_sifts_sequence_percentage_match <= 100.0)
if not((type(pdb_id) == bytes or type(pdb_id) == type('')) and len(pdb_id) == 4 and pdb_id.isalnum()):
raise Exception("Expected an 4-character long alphanumeric PDB identifer. Received '%s'." % str(pdb_id))
self.pdb_id = pdb_id.upper()
self.silent = silent
self.rosetta_scripts_path = rosetta_scripts_path
self.rosetta_database_path = rosetta_database_path
self.bio_cache = bio_cache
self.cache_dir = cache_dir
if (not self.cache_dir) and self.bio_cache:
self.cache_dir = self.bio_cache.cache_dir
self.alignment_cutoff = None
self.acceptable_sequence_percentage_match = acceptable_sequence_percentage_match
self.acceptable_sifts_sequence_percentage_match = acceptable_sifts_sequence_percentage_match
self.replacement_pdb_id = None
self.FASTA = None
self.pdb = None
self.pdbml = None
self.PDB_UniParc_SA = None
self.sifts = None
self.uniparc_sequences = None
self.fasta_sequences = None
self.seqres_sequences = None
self.atom_sequences = None
self.rosetta_sequences = None
self.pdb_to_rosetta_residue_map_error = False
self.rosetta_to_atom_sequence_maps = None
self.atom_to_seqres_sequence_maps = None
self.seqres_to_uniparc_sequence_maps = None
self.atom_to_rosetta_sequence_maps = None
self.seqres_to_atom_sequence_maps = None
self.uniparc_to_seqres_sequence_maps = None # This map is indexed by PDB chain IDs
self.pdbml_atom_to_seqres_sequence_maps = None
self.clustal_seqres_to_uniparc_sequence_maps = None
self.sifts_atom_to_seqres_sequence_maps = None
self.sifts_seqres_to_uniparc_sequence_maps = None
self.sifts_atom_to_uniparc_sequence_maps = None
self.pdb_chain_to_uniparc_chain_mapping = {}
self._create_objects(chains_to_keep, starting_clustal_cut_off, min_clustal_cut_off, True) # todo: at present, we always strip HETATMs. We may want to change this in the future.
self._create_sequences()
self._create_sequence_maps()
self._merge_sifts_maps()
self._prune_maps_to_sequences()
self._validate()
self._create_inverse_maps()
### API functions###
def convert(self, chain_id, residue_id, from_scheme, to_scheme):
'''The API conversion function. This converts between the different residue ID schemes.'''
# At the cost of three function calls, we ignore the case of the scheme parameters to be more user-friendly.
from_scheme = from_scheme.lower()
to_scheme = to_scheme.lower()
assert(from_scheme in ResidueRelatrix.schemes)
assert(to_scheme in ResidueRelatrix.schemes)
return self._convert(chain_id, residue_id, from_scheme, to_scheme)
def _convert(self, chain_id, residue_id, from_scheme, to_scheme):
'''The actual 'private' conversion function.'''
# There are 12 valid combinations but rather than write them all out explicitly, we will use recursion, sacrificing speed for brevity
if from_scheme == 'rosetta':
atom_id = self.rosetta_to_atom_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'atom':
return atom_id
else:
return self._convert(chain_id, atom_id, 'atom', to_scheme)
if from_scheme == 'atom':
if to_scheme == 'rosetta':
return self.atom_to_rosetta_sequence_maps.get(chain_id, {})[residue_id]
else:
seqres_id = self.atom_to_seqres_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'seqres':
return seqres_id
return self.convert(chain_id, seqres_id, 'seqres', to_scheme)
if from_scheme == 'seqres':
if to_scheme == 'uniparc':
return self.seqres_to_uniparc_sequence_maps.get(chain_id, {})[residue_id]
else:
atom_id = self.seqres_to_atom_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'atom':
return atom_id
return self.convert(chain_id, atom_id, 'atom', to_scheme)
if from_scheme == 'uniparc':
seqres_id = self.uniparc_to_seqres_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'seqres':
return seqres_id
else:
return self._convert(chain_id, seqres_id, 'seqres', to_scheme)
raise Exception("We should never reach this line.")
def convert_from_rosetta(self, residue_id, to_scheme):
'''A simpler conversion function to convert from Rosetta numbering without requiring the chain identifier.'''
assert(type(residue_id) == int)
# Find the chain_id associated with the residue_id
# Scan *all* sequences without breaking out to make sure that we do not have any duplicate maps
chain_id = None
for c, sequence in self.rosetta_sequences.items():
for id, r in sequence:
if r.ResidueID == residue_id:
assert(chain_id == None)
chain_id = c
if chain_id:
return self.convert(chain_id, residue_id, 'rosetta', to_scheme)
else:
return None
### Private validation methods ###
def _validate(self):
'''Validate the mappings.'''
self._validate_fasta_vs_seqres()
self._validate_mapping_signature()
self._validate_id_types()
self._validate_residue_types()
def _validate_fasta_vs_seqres(self):
'''Check that the FASTA and SEQRES sequences agree (they sometimes differ)'''
pdb_id = self.pdb_id
for chain_id, sequence in self.pdb.seqres_sequences.items():
if str(sequence) != self.FASTA[pdb_id][chain_id]:
if self.pdb_id in use_seqres_sequence_for_fasta_sequence:
self.FASTA.replace_sequence(self.pdb_id, chain_id, str(sequence))
elif self.pdb_id in use_fasta_sequence_for_seqres_sequence:
self.pdb.seqres_sequences[chain_id] = Sequence.from_sequence(chain_id, self.FASTA[pdb_id][chain_id], self.sequence_types[chain_id])
sequence = self.FASTA[pdb_id][chain_id]
if str(sequence) != self.FASTA[pdb_id][chain_id]:
raise colortext.Exception("The SEQRES and FASTA sequences disagree for chain %s in %s. This can happen but special-case handling (use_seqres_sequence_for_fasta_sequence) should be added to the file containing the %s class." % (chain_id, pdb_id, self.__class__.__name__))
def _validate_mapping_signature(self):
'''Make sure the domains and ranges of the SequenceMaps match the Sequences.'''
# rosetta_to_atom_sequence_maps
for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.items():
# Check that all Rosetta residues have a mapping
assert(sorted(sequence_map.keys()) == sorted(self.rosetta_sequences[chain_id].ids()))
# Check that all ATOM residues in the mapping exist and that the mapping is injective
rng = set(sequence_map.values())
atom_residue_ids = set(self.atom_sequences[chain_id].ids())
assert(rng.intersection(atom_residue_ids) == rng)
assert(len(rng) == len(list(sequence_map.values())))
# atom_to_seqres_sequence_maps
for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.items():
# Check that all ATOM residues have a mapping
#print(sorted(sequence_map.keys()))
#print(sorted(self.atom_sequences[chain_id].ids()))
assert(sorted(sequence_map.keys()) == sorted(self.atom_sequences[chain_id].ids()))
# Check that all SEQRES residues in the mapping exist and that the mapping is injective
rng = set(sequence_map.values())
seqres_residue_ids = set(self.seqres_sequences[chain_id].ids())
assert(rng.intersection(seqres_residue_ids) == rng)
assert(len(rng) == len(list(sequence_map.values())))
# seqres_to_uniparc_sequence_maps
for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.items():
# Check that acceptable_sequence_percentage_match% of all SEQRES residues have a mapping (there may have been
# insertions or bad mismatches i.e. low BLOSUM62/PAM250 scores). I chose 80% arbitrarily but this can be overridden
# with the acceptable_sequence_percentage_match argument to the constructor.
if self.sequence_types[chain_id] == 'Protein' or self.sequence_types[chain_id] == 'Protein skeleton':
if sequence_map:
mapped_SEQRES_residues = set(sequence_map.keys())
all_SEQRES_residues = set(self.seqres_sequences[chain_id].ids())
if len(all_SEQRES_residues) >= 20:
match_percentage = 100.0 * (float(len(mapped_SEQRES_residues))/float((len(all_SEQRES_residues))))
if not (self.acceptable_sequence_percentage_match <= match_percentage <= 100.0):
if not set(list(str(self.seqres_sequences[chain_id]))) == set(['X']):
# Skip cases where all residues are unknown e.g. 1DEQ, chain M
raise Exception("Chain %s in %s only had a match percentage of %0.2f%%" % (chain_id, self.pdb_id, match_percentage))
# Check that all UniParc residues in the mapping exist and that the mapping is injective
if self.pdb_chain_to_uniparc_chain_mapping.get(chain_id):
rng = set([v[1] for v in list(sequence_map.values())])
uniparc_chain_id = self.pdb_chain_to_uniparc_chain_mapping[chain_id]
uniparc_residue_ids = set(self.uniparc_sequences[uniparc_chain_id].ids())
assert(rng.intersection(uniparc_residue_ids) == rng)
if len(rng) != len(list(sequence_map.values())):
rng_vals = set()
for x in list(sequence_map.values()):
if x[1] in rng_vals:
err_msg = ['The SEQRES to UniParc map is not injective for %s, chain %s; the element %s occurs more than once in the range.' % (self.pdb_id, chain_id, str(x))]
err_msg.append(colortext.make('The seqres_to_uniparc_sequence_maps mapping is:', color = 'green'))
for k, v in sequence_map.map.items():
err_msg.append(' %s -> %s' % (str(k).ljust(7), str(v).ljust(20)))
err_msg.append(colortext.make('The clustal_seqres_to_uniparc_sequence_maps mapping is:', color = 'green'))
for k, v in self.clustal_seqres_to_uniparc_sequence_maps[chain_id].map.items():
err_msg.append(' %s -> %s' % (str(k).ljust(7), str(v).ljust(20)))
err_msg.append(colortext.make('The sifts_seqres_to_uniparc_sequence_maps mapping is:', color = 'green'))
for k, v in self.sifts_seqres_to_uniparc_sequence_maps[chain_id].map.items():
err_msg.append(' %s -> %s' % (str(k).ljust(7), str(v).ljust(20)))
raise Exception('\n'.join(err_msg))
rng_vals.add(x[1])
def _validate_id_types(self):
'''Check that the ID types are integers for Rosetta, SEQRES, and UniParc sequences and 6-character PDB IDs for the ATOM sequences.'''
for sequences in [self.uniparc_sequences, self.fasta_sequences, self.seqres_sequences, self.rosetta_sequences]:
for chain_id, sequence in sequences.items():
sequence_id_types = set(map(type, sequence.ids()))
if sequence_id_types:
assert(len(sequence_id_types) == 1)
assert(sequence_id_types.pop() == int)
for chain_id, sequence in self.atom_sequences.items():
sequence_id_types = set(map(type, sequence.ids()))
assert(len(sequence_id_types) == 1)
sequence_id_type = sequence_id_types.pop()
assert(sequence_id_type == bytes or sequence_id_type == str)
def _validate_residue_types(self):
'''Make sure all the residue types map through translation.'''
for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.items():
rosetta_sequence = self.rosetta_sequences[chain_id]
atom_sequence = self.atom_sequences[chain_id]
for rosetta_id, atom_id, _ in sequence_map:
assert(rosetta_sequence[rosetta_id].ResidueAA == atom_sequence[atom_id].ResidueAA)
for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.items():
atom_sequence = self.atom_sequences[chain_id]
seqres_sequence = self.seqres_sequences[chain_id]
for atom_id, seqres_id, _ in sorted(sequence_map):
assert(atom_sequence[atom_id].ResidueAA == seqres_sequence[seqres_id].ResidueAA)
for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.items():
if self.pdb_chain_to_uniparc_chain_mapping.get(chain_id):
seqres_sequence = self.seqres_sequences[chain_id]
uniparc_sequence = self.uniparc_sequences[self.pdb_chain_to_uniparc_chain_mapping[chain_id]]
for seqres_id, uniparc_id_resid_pair, substitution_match in sequence_map:
uniparc_id = uniparc_id_resid_pair[1]
# Some of the matches may not be identical but all the '*' Clustal Omega matches should be identical
if substitution_match and substitution_match.clustal == 1:
assert(seqres_sequence[seqres_id].ResidueAA == uniparc_sequence[uniparc_id].ResidueAA)
### Private Sequence and SequenceMap collection functions ###
def _create_inverse_maps(self):
'''Create the inverse mappings (UniParc -> SEQRES -> ATOM -> Rosetta).'''
# We have already determined that the inverse maps are well-defined (the normal maps are injective). The inverse maps will be partial maps in general.
self.atom_to_rosetta_sequence_maps = {}
for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.items():
s = SequenceMap()
for k, v, substitution_match in sequence_map:
s.add(v, k, substitution_match)
self.atom_to_rosetta_sequence_maps[chain_id] = s
self.seqres_to_atom_sequence_maps = {}
for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.items():
s = SequenceMap()
for k, v, substitution_match in sequence_map:
s.add(v, k, substitution_match)
self.seqres_to_atom_sequence_maps[chain_id] = s
# This map uses PDB chain IDs as PDB chains may map to zero or one UniParc IDs whereas UniParc IDs may map to many PDB chains
self.uniparc_to_seqres_sequence_maps = {}
for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.items():
s = UniParcPDBSequenceMap()
for k, v, substitution_match in sequence_map:
s.add(v, k, substitution_match)
self.uniparc_to_seqres_sequence_maps[chain_id] = s
def _create_sequence_maps(self):
'''Get all of the SequenceMaps - Rosetta->ATOM, ATOM->SEQRES/FASTA, SEQRES->UniParc.'''
if self.sifts:
self.sifts_atom_to_seqres_sequence_maps = self.sifts.atom_to_seqres_sequence_maps
self.sifts_seqres_to_uniparc_sequence_maps = self.sifts.seqres_to_uniparc_sequence_maps
self.sifts_atom_to_uniparc_sequence_maps = self.sifts.atom_to_uniparc_sequence_maps
if self.pdb_id in pdbs_with_do_not_use_SIFTS_for_these_chains:
for chain_id in list(self.sifts_atom_to_seqres_sequence_maps.keys()) + list(self.sifts_seqres_to_uniparc_sequence_maps.keys()) + list(self.sifts_atom_to_uniparc_sequence_maps.keys()):
if (self.pdb_id, chain_id) in do_not_use_SIFTS_for_these_chains:
self.sifts_atom_to_seqres_sequence_maps[chain_id] = SequenceMap()
self.sifts_seqres_to_uniparc_sequence_maps = SequenceMap()
self.sifts_atom_to_uniparc_sequence_maps = SequenceMap()
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_to_atom_sequence_maps = {}
for c in list(self.atom_sequences.keys()):
self.rosetta_to_atom_sequence_maps[c] = SequenceMap()
else:
self.rosetta_to_atom_sequence_maps = self.pdb.rosetta_to_atom_sequence_maps
# If we removed atoms from the PDB file, we need to remove them from the maps so that our validations hold later on
self.pdbml_atom_to_seqres_sequence_maps = self.pdbml.atom_to_seqres_sequence_maps
if self.pdb_id in ROSETTA_HACKS_residues_to_remove:
for residue_to_remove in ROSETTA_HACKS_residues_to_remove[self.pdb_id]:
chain_id = residue_to_remove[0]
self.pdbml_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
#if self.sifts:
# self.sifts_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.clustal_seqres_to_uniparc_sequence_maps = self.PDB_UniParc_SA.seqres_to_uniparc_sequence_maps
def _merge_sifts_maps(self):
''' Make sure that the pdbml_atom_to_seqres_sequence_maps and clustal_seqres_to_uniparc_sequence_maps agree with SIFTS and merge the maps.
SIFTS may have more entries since we discard PDB residues which break Rosetta.
SIFTS may have less entries for some cases e.g. 1AR1, chain C where SIFTS does not map ATOMs 99-118.
SIFTS does not seem to contain ATOM to SEQRES mappings for (at least some) DNA chains e.g. 1APL, chain A
Because of these cases, we just assert that the overlap agrees so that we can perform a gluing of maps.'''
if self.pdb_id in do_not_use_the_sequence_aligner:
assert(self.sifts)
self.atom_to_seqres_sequence_maps = self.sifts_atom_to_seqres_sequence_maps
self.seqres_to_uniparc_sequence_maps = self.sifts_seqres_to_uniparc_sequence_maps
elif self.sifts:
self.atom_to_seqres_sequence_maps = {}
self.seqres_to_uniparc_sequence_maps = {}
for c, seqmap in sorted(self.pdbml_atom_to_seqres_sequence_maps.items()):
if self.sequence_types[c] == 'Protein' or self.sequence_types[c] == 'Protein skeleton':
try:
if self.sifts_atom_to_seqres_sequence_maps.get(c):
assert(self.pdbml_atom_to_seqres_sequence_maps[c].matches(self.sifts_atom_to_seqres_sequence_maps[c]))
self.atom_to_seqres_sequence_maps[c] = self.pdbml_atom_to_seqres_sequence_maps[c] + self.sifts_atom_to_seqres_sequence_maps[c]
else:
self.atom_to_seqres_sequence_maps[c] = self.pdbml_atom_to_seqres_sequence_maps[c]
except Exception as e:
raise colortext.Exception("Mapping cross-validation failed checking atom to seqres sequence maps between PDBML and SIFTS in %s, chain %s: %s" % (self.pdb_id, c, str(e)))
else:
self.atom_to_seqres_sequence_maps[c] = seqmap
for c, seqmap in sorted(self.clustal_seqres_to_uniparc_sequence_maps.items()):
if self.sequence_types[c] == 'Protein' or self.sequence_types[c] == 'Protein skeleton':
if (self.pdb_id, c) in use_SIFTS_match_for_seqres_sequence:
#assert(seqres_sequence[seqres_id].ResidueAA == uniparc_sequence[uniparc_id].ResidueAA)
if (self.pdb_id, c) not in known_bad_clustal_to_sifts_mappings:
# Flag cases for manual inspection
assert(list(self.clustal_seqres_to_uniparc_sequence_maps[c].keys()) == list(self.sifts_seqres_to_uniparc_sequence_maps[c].keys()))
for k in list(self.clustal_seqres_to_uniparc_sequence_maps[c].keys()):
v_1 = self.clustal_seqres_to_uniparc_sequence_maps[c][k]
v_2 = self.sifts_seqres_to_uniparc_sequence_maps[c][k]
if (self.pdb_id, c) not in known_bad_clustal_to_sifts_mappings and v_2:
# Make sure the UniParc IDs agree
assert(v_1[0] == v_2[0])
if (self.pdb_id, c) not in known_bad_clustal_to_sifts_mappings:
# Make sure the residue types agree
assert(self.uniparc_sequences[v_1[0]][v_1[1]].ResidueAA == self.uniparc_sequences[v_1[0]][v_2[1]].ResidueAA)
# Copy the substitution scores over. Since the residue types agree, this is valid
self.sifts_seqres_to_uniparc_sequence_maps[c].substitution_scores[k] = self.clustal_seqres_to_uniparc_sequence_maps[c].substitution_scores[k]
self.clustal_seqres_to_uniparc_sequence_maps[c] = self.sifts_seqres_to_uniparc_sequence_maps[c]
try:
if self.sifts_seqres_to_uniparc_sequence_maps.get(c):
if not self.clustal_seqres_to_uniparc_sequence_maps[c].matches(self.sifts_seqres_to_uniparc_sequence_maps[c]):
mismatched_keys = self.clustal_seqres_to_uniparc_sequence_maps[c].get_mismatches(self.sifts_seqres_to_uniparc_sequence_maps[c])
raise Exception("self.clustal_seqres_to_uniparc_sequence_maps[c].matches(self.sifts_seqres_to_uniparc_sequence_maps[c])")
self.seqres_to_uniparc_sequence_maps[c] = self.clustal_seqres_to_uniparc_sequence_maps[c] + self.sifts_seqres_to_uniparc_sequence_maps[c]
else:
self.seqres_to_uniparc_sequence_maps[c] = self.clustal_seqres_to_uniparc_sequence_maps[c]
except Exception as e:
colortext.warning(traceback.format_exc())
colortext.error(str(e))
raise colortext.Exception("Mapping cross-validation failed checking atom to seqres sequence maps between Clustal and SIFTS in %s, chain %s." % (self.pdb_id, c))
else:
self.clustal_seqres_to_uniparc_sequence_maps[c] = seqmap
else:
self.atom_to_seqres_sequence_maps = self.pdbml_atom_to_seqres_sequence_maps
self.seqres_to_uniparc_sequence_maps = self.clustal_seqres_to_uniparc_sequence_maps
def _prune_maps_to_sequences(self):
''' When we merge the SIFTS maps, we can extend the sequence maps such that they have elements in their domain that we removed
from the sequence e.g. 1A2P, residue 'B 3 ' is removed because Rosetta barfs on it. Here, we prune the maps so that their
domains do not have elements that were removed from sequences.'''
for c, seq in self.atom_sequences.items():
res_ids = [r[0] for r in seq]
for_removal = []
for k, _, _ in self.atom_to_seqres_sequence_maps[c]:
if k not in res_ids:
for_removal.append(k)
for res_id in for_removal:
self.atom_to_seqres_sequence_maps[c].remove(res_id)
#print(self.fasta_sequences)
#print(self.seqres_sequences)
#self.atom_to_seqres_sequence_maps = None
#self.seqres_to_uniparc_sequence_maps = None
#self.pdbml_atom_to_seqres_sequence_maps = None
#self.clustal_seqres_to_uniparc_sequence_maps = None
#self.sifts_atom_to_seqres_sequence_maps = None
#self.sifts_seqres_to_uniparc_sequence_maps = None
#self.sifts_atom_to_uniparc_sequence_maps = None
def _create_sequences(self):
'''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.'''
# Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences
try:
self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir)
except PDBMissingMainchainAtomsException:
self.pdb_to_rosetta_residue_map_error = True
# Get all the Sequences
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences
else:
self.uniparc_sequences = self.sifts.get_uniparc_sequences()
self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id)
self.seqres_sequences = self.pdb.seqres_sequences
self.atom_sequences = self.pdb.atom_sequences
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_sequences = {}
for c in list(self.atom_sequences.keys()):
self.rosetta_sequences[c] = Sequence()
else:
self.rosetta_sequences = self.pdb.rosetta_sequences
# Update the chain types for the UniParc sequences
uniparc_pdb_chain_mapping = {}
if self.pdb_id not in do_not_use_the_sequence_aligner:
for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.items():
if matches:
# we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc.
uniparc_chain_id = list(matches.keys())[0]
assert(len(matches) == 1)
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
else:
for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().items():
for uniparc_chain_id in uniparc_chain_ids:
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.items():
sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids])
assert(len(sequence_type) == 1)
sequence_type = sequence_type.pop()
assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None)
self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type)
for p in pdb_chain_ids:
self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id
# Update the chain types for the FASTA sequences
for chain_id, sequence in self.seqres_sequences.items():
self.fasta_sequences[chain_id].set_type(sequence.sequence_type)
### Private object creation functions ###
def _create_objects(self, chains_to_keep, starting_clustal_cut_off, min_clustal_cut_off, strip_HETATMS):
pdb_id = self.pdb_id
assert(20 <= min_clustal_cut_off <= starting_clustal_cut_off <= 100)
# Create the FASTA object
if not self.silent:
colortext.message("Creating the FASTA object.")
try:
if self.bio_cache:
self.FASTA = self.bio_cache.get_fasta_object(pdb_id)
else:
self.FASTA = FASTA.retrieve(pdb_id, cache_dir = self.cache_dir)
except:
raise colortext.Exception("Relatrix construction failed creating the FASTA object for %s.\n%s" % (pdb_id, traceback.format_exc()))
# Create the PDB object
if not self.silent:
colortext.message("Creating the PDB object.")
try:
if self.bio_cache:
self.pdb = self.bio_cache.get_pdb_object(pdb_id)
else:
self.pdb = PDB.retrieve(pdb_id, cache_dir = self.cache_dir)
if chains_to_keep:
self.pdb.strip_to_chains(chains_to_keep)
if strip_HETATMS:
self.pdb.strip_HETATMs()
except:
raise colortext.Exception("Relatrix construction failed creating the PDB object for %s.\n%s" % (pdb_id, traceback.format_exc()))
# Copy PDB properties
if self.pdb.deprecated:
self.replacement_pdb_id = self.pdb.replacement_pdb_id
self.sequence_types = self.pdb.chain_types
# todo: benchmark why PDBML creation is slow for some files e.g. 3ZKB.xml (lots of repeated chains)
# Create the PDBML object
if not self.silent:
colortext.message("Creating the PDBML object.")
try:
if self.bio_cache:
self.pdbml = self.bio_cache.get_pdbml_object(pdb_id)
else:
self.pdbml = PDBML.retrieve(pdb_id, cache_dir = self.cache_dir, bio_cache = self.bio_cache)
except:
raise colortext.Exception("Relatrix construction failed creating the PDBML object for %s.\n%s" % (pdb_id, traceback.format_exc()))
# Copy PDBML properties
if self.pdbml.deprecated:
if self.replacement_pdb_id:
assert(self.replacement_pdb_id == self.pdbml.replacement_pdb_id)
else:
self.replacement_pdb_id = self.pdbml.replacement_pdb_id
# Create the SIFTS object
try:
if self.bio_cache:
self.sifts = self.bio_cache.get_sifts_object(pdb_id, acceptable_sequence_percentage_match = self.acceptable_sifts_sequence_percentage_match)
else:
self.sifts = SIFTS.retrieve(pdb_id, cache_dir = self.cache_dir, acceptable_sequence_percentage_match = self.acceptable_sifts_sequence_percentage_match)
except MissingSIFTSRecord:
colortext.warning("No SIFTS entry was found for %s." % pdb_id)
except BadSIFTSMapping:
colortext.warning("The SIFTS mapping for %s was considered a bad mapping at the time of writing." % pdb_id)
except NoSIFTSPDBUniParcMapping:
colortext.warning("The PDB file %s has a known bad SIFTS mapping at the time of writing." % pdb_id)
# Create the PDBUniParcSequenceAligner object. We try the best alignment at first (100%) and then fall back to more relaxed alignments down to min_clustal_cut_off percent.
if not self.silent:
colortext.message("Creating the PDB to UniParc SequenceAligner object.")
cut_off = 0
try:
matched_chains = set()
matched_all_chains = False
self.PDB_UniParc_SA = None
if self.pdb_id not in do_not_use_the_sequence_aligner:
cut_off = None
for x in range(starting_clustal_cut_off, min_clustal_cut_off - 1, -1):
cut_off = x
if not self.silent:
colortext.warning("\tTrying to align sequences with a cut-off of %d%%." % cut_off)
if not self.PDB_UniParc_SA:
# Initialize the PDBUniParcSequenceAligner the first time through
self.PDB_UniParc_SA = PDBUniParcSequenceAligner(pdb_id, cache_dir = self.cache_dir, cut_off = cut_off, sequence_types = self.sequence_types, replacement_pdb_id = self.replacement_pdb_id, added_uniprot_ACs = self.pdb.get_UniProt_ACs())
else:
# We have already retrieved the UniParc entries. We just need to try the mapping again. This saves
# lots of time for entries with large numbers of UniProt entries e.g. 1HIO even if disk caching is used.
# We also stop trying to match a chain once a match has been found in a previous iteration.
# This speeds up the matching in multiple ways. First, we do not waste time by recreating the same UniParcEntry.
# Next, we do not waste time rematching chains we previously matched. Finally, we only match equivalence
# classes of chains where the equivalence is defined as having an identical sequence.
# For example we sped up:
# matching 1YGV (3 protein chains, 2 identical) starting at 100% by 67% (down from 86s to 28.5s with a match at 85%); (this case may be worth profiling)
# speed ups at each stage; not recreating PDB_UniParc_SA (due to low match%), only matching chains once (as A, C are found at 95%), and skipping sequence-equivalent chains (as A and C have the same sequence)
# matching 1HIO (4 protein chains, all unique) starting at 100% by 83% down from 33s to 5.5s (match at 95%);
# main speed-up due to not recreating PDB_UniParc_SA
# matching 1H38 (4 identical protein chains) starting at 100% by 5% down from 57s to 54s (match at 100%);
# no real speed-up since the match is at 100%
# matching the extreme case 487D (7 protein chains, all unique) starting at 100% by 94% down from 1811s to 116s (with a min_clustal_cut_off of 71%). A lot of the time in this case is in constructing the UniParcEntry object.
# main speed-up due to not recreating PDB_UniParc_SA
# matching 3ZKB (16 protein chains, all identical) starting at 100% by 90% (down from 31s to 3s with a match at 98%); (this case may be worth profiling)
# a lot of time was spent in PDBML creation (another optimization problem) so I only profiled this PDB_UniParc_SA section
# minor speed-up (31s to 27s) by not recreating PDB_UniParc_SA (match at 98%), main speed-up due to skipping sequence-equivalent chain (we only have to match one sequence)
self.PDB_UniParc_SA.realign(cut_off, chains_to_skip = matched_chains)
# We only care about protein chain matches so early out as soon as we have them all matched
protein_chain_matches = {}
for _c, _st in self.sequence_types.items():
if _st == 'Protein' or _st == 'Protein skeleton':
protein_chain_matches[_c] = self.PDB_UniParc_SA.clustal_matches[_c]
if protein_chain_matches[_c]:
matched_chains.add(_c)
num_matches_per_chain = set(map(len, list(protein_chain_matches.values())))
if len(num_matches_per_chain) == 1 and num_matches_per_chain.pop() == 1:
# We have exactly one match per protein chain. Early out.
if not self.silent:
colortext.message("\tSuccessful match with a cut-off of %d%%." % cut_off)
matched_all_chains = True
self.alignment_cutoff = cut_off
break
else:
# We have ambiguity - more than one match per protein chain. Exception.
if [n for n in num_matches_per_chain if n > 1]:
raise MultipleAlignmentException("Too many matches found at cut-off %d." % cut_off)
if not matched_all_chains:
protein_chains = [c for c in self.sequence_types if self.sequence_types[c].startswith('Protein')]
if not self.silent:
colortext.warning('\nNote: Not all chains were matched:')
for c in protein_chains:
if protein_chain_matches.get(c):
colortext.message(' %s matched %s' % (c, protein_chain_matches[c]))
else:
colortext.warning(' %s was not matched' % c)
print('')
num_matches_per_chain = set(map(len, list(self.PDB_UniParc_SA.clustal_matches.values())))
if num_matches_per_chain == set([0, 1]):
# We got matches but are missing chains
self.alignment_cutoff = cut_off
except MultipleAlignmentException as e:
# todo: this will probably fail with DNA or RNA so do not include those in the alignment
raise colortext.Exception("Relatrix construction failed creating the PDBUniParcSequenceAligner object for %s. The cut-off level reached %d%% without finding a match for all chains but at that level, the mapping from chains to UniParc IDs was not injective.\n%s" % (pdb_id, cut_off, str(e)))
except:
raise colortext.Exception("Relatrix construction failed creating the PDBUniParcSequenceAligner object for %s.\n%s" % (pdb_id, traceback.format_exc()))
| {
"content_hash": "6b7f8d3b44a60b7f87e9151daee7b681",
"timestamp": "",
"source": "github",
"line_count": 711,
"max_line_length": 333,
"avg_line_length": 61.77074542897328,
"alnum_prop": 0.6137890206971925,
"repo_name": "Kortemme-Lab/klab",
"id": "2bf710d3158af54869b62d545a2912397070148e",
"size": "43955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klab/bio/relatrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "62782"
},
{
"name": "Python",
"bytes": "2074156"
},
{
"name": "R",
"bytes": "4487"
},
{
"name": "Shell",
"bytes": "4382"
},
{
"name": "TeX",
"bytes": "2107"
}
],
"symlink_target": ""
} |
import unittest
import os
from pprint import pprint
from docx import fullyQualifiedName
from docx import DocxProcessor
from document import Document
from fragments import *
class DocxTests (unittest.TestCase):
def fragment(self, lineNumber, range, text, attributes):
if range:
result = LineChunkFragmentIdentifier().initWithDetails(FragmentIdentifier.getNextIdentifier(), lineNumber, range, text, attributes)
else:
result = WholeLineFragmentIdentifier().initWithDetails(FragmentIdentifier.getNextIdentifier(), (lineNumber, lineNumber), text, attributes)
return result
def test_fullyQualifiedName (self):
self.assertEqual(fullyQualifiedName('w', 'pPr'), '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}pPr')
def test_paragraphStyleReading (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '02_paragraph_styles.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor._word_document
paragraph = [element for element in document.iter() if element.tag == fullyQualifiedName('w', 'p')][0]
pStyle = paragraph.find('./' + fullyQualifiedName('w','pPr') + '/' + fullyQualifiedName('w', 'pStyle'))
style = pStyle.get(fullyQualifiedName('w', 'val'))
self.assertEqual(style, 'style1')
def test_paragraphStyleMapping (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '02_paragraph_styles.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
styleMapping = docxProcessor.paragraphStyleMapping()
self.assertEqual(len(styleMapping), 9)
self.assertIn('style0', styleMapping.keys())
self.assertEqual('Default Style', styleMapping['style0'])
self.assertIn('style1', styleMapping.keys())
self.assertEqual('Heading 1', styleMapping['style1'])
def test_formattingFragmentSorting (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '03_text_formatting.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor.document()
expectedFormatting = [
self.fragment(1, None, "I can do that\n", {'style': 'Heading 1'}),
self.fragment(3, None, "I gotta piss\n", {'style': 'Heading 1'}),
self.fragment(4, (133, 158), "Do you believe that shit?", {'formattings': ['underline']}),
self.fragment(5, None, "We happy?\n", {'style': 'Heading 1'}),
self.fragment(6, (118, 269), "Do you see a little Asian child with a blank expression on his face sitting outside on a mechanical helicopter that shakes when you put quarters in it?", {'formattings': ['bold']}),
self.fragment(7, None, "Uuummmm, this is a tasty burger!\n", {'style': 'Heading 1'}),
self.fragment(8, (335, 347), "motherfucker", {'formattings': ['italic']}),
self.fragment(8, (443, 455), "motherfucker", {'formattings': ['italic']}),
self.fragment(9, None, "I'm serious as a heart attack\n", {'style': 'Heading 1'}),
self.fragment(10, (182, 194), "Motherfucker", {'formattings': ['italic']})
]
print("DOCUMENT FORMATTING:\n" + str(document.formatting()))
self.assertIsInstance(document.formatting()[0], FragmentIdentifier)
self.assertEqual(expectedFormatting, document.formatting())
def test_textTrimming (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '07_trim.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor.document()
self.assertEqual('Paragraph with trailing and closing spaces', document.content()[0])
self.assertEqual('Paragraph with trailing and closing spaces', document.formatting()[0].text),
def test_textWithLineBreak (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '06_new_line_same_paragraph.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor.document()
self.assertEqual('Title', document.content()[0])
self.assertEqual('Regular paragraph.\rNew line on the same paragraph.', document.content()[1])
self.assertEqual('New paragraph.', document.content()[2])
def test_textWithLineBlocks (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '08_line_blocks.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor.document()
expectedDocument = Document().initWithFile(os.path.join(os.getcwd(), 'samples', 'expected outcome', 'docx', 'test_08'))
self.assertEquals(expectedDocument.content(), document.content())
self.assertEquals(expectedDocument.formatting(), document.formatting())
def test_toc (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '09_toc.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor.document()
expectedDocument = Document().initWithFile(os.path.join(os.getcwd(), 'samples', 'expected outcome', 'docx', 'test_09'))
self.assertEquals(expectedDocument.content(), document.content())
self.assertEquals(expectedDocument.formatting(), document.formatting())
def test_textWithLineBreaks (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '10_line-break.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor.document()
expectedDocument = Document().initWithFile(os.path.join(os.getcwd(), 'samples', 'expected outcome', 'docx', 'test_10'))
self.assertEquals(expectedDocument.content(), document.content())
self.assertEquals(expectedDocument.formatting(), document.formatting())
def test_textWithWeirdFormatting (self):
sampleDocxFile = os.path.join(os.getcwd(), 'samples', 'docx', '11_weird_formatting.docx')
docxProcessor = DocxProcessor(sampleDocxFile)
document = docxProcessor.document()
expectedDocument = Document().initWithFile(os.path.join(os.getcwd(), 'samples', 'expected outcome', 'docx', 'test_11'))
self.assertEquals(expectedDocument.content(), document.content())
self.assertEquals(expectedDocument.formatting(), document.formatting())
| {
"content_hash": "a0c179521074eaa26277c2223b990395",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 214,
"avg_line_length": 46.41269841269841,
"alnum_prop": 0.7323871409028728,
"repo_name": "simplicissimus/metadata-processor",
"id": "1427395a157e7f81797eacf94665ef672ad10af9",
"size": "6393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_docx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "352"
},
{
"name": "Python",
"bytes": "122546"
}
],
"symlink_target": ""
} |
from ..libs.pluginbase import PluginBase
class Plugin(PluginBase):
def __init__(self, start, *args):
super().__init__(start, *args)
def _plugin_info(self):
self.data_dir = "load_image"
self.version = "0.11.0"
self.name = "Image Loader"
self.author = "Infected"
def _make_layout(self):
self.background = self.load_image('background')
self.screen.set_fps(5)
self.screen.add(self.background, 'background')
def _event_loop(self, event):
pass
def _start(self):
pass
| {
"content_hash": "66caaf886a97f3b954ff64ac988792e4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 55,
"avg_line_length": 25.727272727272727,
"alnum_prop": 0.5848056537102474,
"repo_name": "tomsimonart/GLM-web-interface",
"id": "bbaa296e0d65e8ea05a32075daf14685374ecd5c",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GLM/source/plugins/load_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5167"
},
{
"name": "Python",
"bytes": "10171"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
import unittest
from defcon import Font, Glyph, Contour, Component, Anchor, Guideline, Layer
from defcon.test.testTools import getTestFontPath
class GlyphTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
def test_identifiers(self):
glyph = Glyph()
pointPen = glyph.getPointPen()
pointPen.beginPath(identifier="contour 1")
pointPen.addPoint((0, 0), identifier="point 1")
pointPen.addPoint((0, 0), identifier="point 2")
pointPen.endPath()
pointPen.beginPath(identifier="contour 2")
pointPen.endPath()
pointPen.addComponent("A", (1, 1, 1, 1, 1, 1),
identifier="component 1")
pointPen.addComponent("A", (1, 1, 1, 1, 1, 1),
identifier="component 2")
guideline = Guideline()
guideline.identifier = "guideline 1"
glyph.appendGuideline(guideline)
guideline = Guideline()
guideline.identifier = "guideline 2"
glyph.appendGuideline(guideline)
self.assertEqual([contour.identifier for contour in glyph],
["contour 1", "contour 2"])
self.assertEqual([point.identifier for point in glyph[0]],
["point 1", "point 2"])
self.assertEqual(
[component.identifier for component in glyph.components],
["component 1", "component 2"])
with self.assertRaises(AssertionError):
pointPen.beginPath(identifier="contour 1")
pointPen.endPath()
pointPen.beginPath()
pointPen.addPoint((0, 0))
with self.assertRaises(AssertionError):
pointPen.addPoint((0, 0), identifier="point 1")
pointPen.endPath()
with self.assertRaises(AssertionError):
pointPen.addComponent("A", (1, 1, 1, 1, 1, 1),
identifier="component 1")
g = Guideline()
g.identifier = "guideline 1"
with self.assertRaises(AssertionError):
glyph.appendGuideline(g)
self.assertEqual(
sorted(glyph.identifiers),
["component 1", "component 2", "contour 1", "contour 2",
"guideline 1", "guideline 2", "point 1", "point 2"])
glyph.removeContour(glyph[0])
self.assertEqual(
sorted(glyph.identifiers),
["component 1", "component 2", "contour 2",
"guideline 1", "guideline 2"])
glyph.removeComponent(glyph.components[0])
self.assertEqual(
sorted(glyph.identifiers),
["component 2", "contour 2", "guideline 1", "guideline 2"])
glyph.removeGuideline(glyph.guidelines[0])
self.assertEqual(
sorted(glyph.identifiers),
["component 2", "contour 2", "guideline 2"])
def test_name_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.name = "RenamedGlyph"
self.assertEqual(glyph.name, "RenamedGlyph")
self.assertEqual(sorted(font.keys()), ["B", "C", "RenamedGlyph"])
font = Font(getTestFontPath())
glyph = font["A"]
glyph.name = "A"
self.assertFalse(glyph.dirty)
def test_name_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.name, "A")
def test_unicodes_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.unicodes, [65])
def test_unicodes_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.unicodes = [123, 456]
self.assertEqual(glyph.unicodes, [123, 456])
self.assertTrue(glyph.dirty)
def test_unicode_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.unicode, 65)
def test_unicode_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.unicode = 123
self.assertEqual(glyph.unicodes, [123])
glyph.unicode = 456
self.assertEqual(glyph.unicodes, [456])
self.assertTrue(glyph.dirty)
def test_bounds(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.bounds, (0, 0, 700, 700))
glyph = font["B"]
self.assertEqual(glyph.bounds, (0, 0, 700, 700))
glyph = font["C"]
self.assertEqual(glyph.bounds, (0.0, 0.0, 700.0, 700.0))
def test_controlPointBounds(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.controlPointBounds, (0, 0, 700, 700))
glyph = font["B"]
self.assertEqual(glyph.controlPointBounds, (0, 0, 700, 700))
glyph = font["C"]
self.assertEqual(glyph.controlPointBounds, (0.0, 0.0, 700.0, 700.0))
def test_leftMargin_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.leftMargin, 0)
glyph = font["B"]
self.assertEqual(glyph.leftMargin, 0)
def test_leftMargin_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.leftMargin = 100
self.assertEqual(glyph.leftMargin, 100)
self.assertEqual(glyph.width, 800)
self.assertTrue(glyph.dirty)
def test_rightMargin_get(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.rightMargin, 0)
def test_rightMargin_set(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
glyph.rightMargin = 100
self.assertEqual(glyph.rightMargin, 100)
self.assertEqual(glyph.width, 800)
self.assertTrue(glyph.dirty)
def test_bottomMargin_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.bottomMargin, 0)
glyph = font["B"]
self.assertEqual(glyph.bottomMargin, 0)
# empty glyph
glyph = font.newGlyph("D")
self.assertIsNone(glyph.bottomMargin)
def test_bottomMargin_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.bottomMargin = 100
self.assertEqual(glyph.bottomMargin, 100)
self.assertEqual(glyph.height, 600)
self.assertEqual(glyph.verticalOrigin, 500)
self.assertTrue(glyph.dirty)
# now glyph.verticalOrigin is defined
glyph.bottomMargin = 50
self.assertEqual(glyph.bottomMargin, 50)
self.assertEqual(glyph.height, 550)
self.assertEqual(glyph.verticalOrigin, 500)
self.assertTrue(glyph.dirty)
# empty glyph
glyph = font.newGlyph("D")
glyph.dirty = False
glyph.bottomMargin = 10
self.assertIsNone(glyph.bottomMargin)
self.assertEqual(glyph.height, 0)
self.assertIsNone(glyph.verticalOrigin)
self.assertFalse(glyph.dirty)
def test_topMargin_get(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.topMargin, -200)
# empty glyph
glyph = font.newGlyph("D")
self.assertIsNone(glyph.topMargin)
def test_topMargin_set(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
glyph.topMargin = 100
self.assertEqual(glyph.topMargin, 100)
self.assertEqual(glyph.height, 800)
self.assertEqual(glyph.verticalOrigin, 800)
self.assertTrue(glyph.dirty)
# now glyph.verticalOrigin is defined
glyph.topMargin = 50
self.assertEqual(glyph.topMargin, 50)
self.assertEqual(glyph.height, 750)
self.assertEqual(glyph.verticalOrigin, 750)
self.assertTrue(glyph.dirty)
# empty glyph
glyph = font.newGlyph("D")
glyph.dirty = False
glyph.topMargin = 10
self.assertIsNone(glyph.topMargin)
self.assertEqual(glyph.height, 0)
self.assertIsNone(glyph.verticalOrigin)
self.assertFalse(glyph.dirty)
def test_width_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.width, 700)
def test_width_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.width = 100
self.assertEqual(glyph.width, 100)
self.assertTrue(glyph.dirty)
def test_height_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.height, 500)
def test_height_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.height = 100
self.assertEqual(glyph.height, 100)
self.assertEqual(glyph.verticalOrigin, None)
self.assertTrue(glyph.dirty)
def test_markColor(self):
from defcon.objects.font import Font
font = Font()
font.newGlyph("A")
glyph = font["A"]
self.assertIsNone(glyph.markColor)
glyph.markColor = "1,0,1,0"
self.assertEqual(glyph.markColor, "1,0,1,0")
glyph.markColor = "1,0,1,0"
self.assertEqual(glyph.markColor, "1,0,1,0")
glyph.markColor = None
self.assertIsNone(glyph.markColor)
def test_verticalOrigin(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font()
font.newGlyph("A")
glyph = font["A"]
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = 1000
self.assertEqual(glyph.verticalOrigin, 1000)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = 0
self.assertEqual(glyph.verticalOrigin, 0)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = -10
self.assertEqual(glyph.verticalOrigin, -10)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = None
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 0)
font = Font(getTestFontPath())
glyph = font["A"]
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = 1000
self.assertEqual(glyph.verticalOrigin, 1000)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = 0
self.assertEqual(glyph.verticalOrigin, 0)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = -10
self.assertEqual(glyph.verticalOrigin, -10)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = None
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 500)
def test_appendContour(self):
glyph = Glyph()
glyph.dirty = False
contour = Contour()
glyph.appendContour(contour)
self.assertEqual(len(glyph), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(contour.getParent(), glyph)
def test_removeContour(self):
font = Font(getTestFontPath())
glyph = font["A"]
contour = glyph[0]
glyph.removeContour(contour)
self.assertFalse(contour in glyph._contours)
self.assertIsNone(contour.getParent())
def test_contourIndex(self):
font = Font(getTestFontPath())
glyph = font["A"]
contour = glyph[0]
self.assertEqual(glyph.contourIndex(contour), 0)
contour = glyph[1]
self.assertEqual(glyph.contourIndex(contour), 1)
def test_clearContours(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.clearContours()
self.assertEqual(len(glyph), 0)
def test_components(self):
font = Font(getTestFontPath())
glyph = font["C"]
self.assertEqual(len(glyph.components), 2)
def test_appendComponent(self):
glyph = Glyph()
glyph.dirty = False
component = Component()
glyph.appendComponent(component)
self.assertEqual(len(glyph.components), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(component.getParent(), glyph)
def test_removeComponent(self):
font = Font(getTestFontPath())
glyph = font["C"]
component = glyph.components[0]
glyph.removeComponent(component)
self.assertFalse(component in glyph.components)
self.assertIsNone(component.getParent())
def test_componentIndex(self):
font = Font(getTestFontPath())
glyph = font["C"]
component = glyph.components[0]
self.assertEqual(glyph.componentIndex(component), 0)
component = glyph.components[1]
self.assertEqual(glyph.componentIndex(component), 1)
def test_clearComponents(self):
font = Font(getTestFontPath())
glyph = font["C"]
glyph.clearComponents()
self.assertEqual(len(glyph.components), 0)
def test_decomposeComponent(self):
font = Font()
font.newGlyph("baseGlyph")
baseGlyph = font["baseGlyph"]
pointPen = baseGlyph.getPointPen()
pointPen.beginPath(identifier="contour1")
pointPen.addPoint((0, 0), "move", identifier="point1")
pointPen.addPoint((0, 100), "line")
pointPen.addPoint((100, 100), "line")
pointPen.addPoint((100, 0), "line")
pointPen.addPoint((0, 0), "line")
pointPen.endPath()
font.newGlyph("referenceGlyph")
referenceGlyph = font["referenceGlyph"]
pointPen = referenceGlyph.getPointPen()
pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 0, 0))
self.assertEqual(len(referenceGlyph.components), 1)
self.assertEqual(len(referenceGlyph), 0)
referenceGlyph.decomposeAllComponents()
self.assertEqual(len(referenceGlyph.components), 0)
self.assertEqual(len(referenceGlyph), 1)
self.assertEqual(referenceGlyph[0].identifier, "contour1")
self.assertEqual(referenceGlyph[0][0].identifier, "point1")
pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 100, 100))
self.assertEqual(len(referenceGlyph.components), 1)
self.assertEqual(len(referenceGlyph), 1)
component = referenceGlyph.components[0]
referenceGlyph.decomposeComponent(component)
self.assertEqual(len(referenceGlyph.components), 0)
self.assertEqual(len(referenceGlyph), 2)
self.assertEqual(referenceGlyph[0].identifier, "contour1")
self.assertEqual(referenceGlyph[0][0].identifier, "point1")
referenceGlyph[1].identifier
referenceGlyph[1][0].identifier
def test_decomposeComponent_nested_components(self):
font = Font()
font.newGlyph("baseGlyph")
baseGlyph = font["baseGlyph"]
pointPen = baseGlyph.getPointPen()
pointPen.beginPath(identifier="contour1")
pointPen.addPoint((0, 0), "move", identifier="point1")
pointPen.addPoint((0, 100), "line")
pointPen.addPoint((100, 100), "line")
pointPen.addPoint((100, 0), "line")
pointPen.addPoint((0, 0), "line")
pointPen.endPath()
font.newGlyph("referenceGlyph1")
referenceGlyph1 = font["referenceGlyph1"]
pointPen = referenceGlyph1.getPointPen()
pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 3, 6))
font.newGlyph("referenceGlyph2")
referenceGlyph2 = font["referenceGlyph2"]
pointPen = referenceGlyph2.getPointPen()
pointPen.addComponent("referenceGlyph1", (1, 0, 0, 1, 10, 20))
referenceGlyph2.decomposeAllComponents()
self.assertEqual(len(referenceGlyph2.components), 0)
self.assertEqual(len(referenceGlyph1.components), 1)
self.assertEqual(len(referenceGlyph2), 1)
self.assertEqual(referenceGlyph2.bounds, (13, 26, 113, 126))
def test_anchors(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(len(glyph.anchors), 2)
def test_appendAnchor(self):
glyph = Glyph()
glyph.dirty = False
anchor = Anchor()
glyph.appendAnchor(anchor)
self.assertEqual(len(glyph.anchors), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(anchor.getParent(), glyph)
def test_removeAnchor(self):
font = Font(getTestFontPath())
glyph = font["A"]
anchor = glyph.anchors[0]
glyph.removeAnchor(anchor)
self.assertFalse(anchor in glyph.anchors)
self.assertIsNone(anchor.getParent())
def test_anchorIndex(self):
font = Font(getTestFontPath())
glyph = font["A"]
anchor = glyph.anchors[0]
self.assertEqual(glyph.anchorIndex(anchor), 0)
anchor = glyph.anchors[1]
self.assertEqual(glyph.anchorIndex(anchor), 1)
def test_clearAnchors(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.clearAnchors()
self.assertEqual(len(glyph.anchors), 0)
def test_duplicatedAnchors(self):
font = Font(getTestFontPath())
glyph = font["A"]
anchor = glyph.anchors[0]
with self.assertRaises(AssertionError):
glyph.appendAnchor(anchor)
def test_appendGuideline(self):
glyph = Glyph()
glyph.dirty = False
guideline = Guideline()
glyph.appendGuideline(guideline)
self.assertEqual(len(glyph.guidelines), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(guideline.getParent(), glyph)
def test_removeGuideline(self):
font = Font(getTestFontPath())
glyph = font.layers["Layer 1"]["A"]
guideline = glyph.guidelines[0]
glyph.removeGuideline(guideline)
self.assertFalse(guideline in glyph.guidelines)
self.assertIsNone(guideline.getParent())
def test_clearGuidelines(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.clearGuidelines()
self.assertEqual(len(glyph.guidelines), 0)
def test_duplicatedGuideline(self):
font = Font(getTestFontPath())
glyph = font.layers["Layer 1"]["A"]
guideline = glyph.guidelines[0]
with self.assertRaises(AssertionError):
glyph.appendGuideline(guideline)
def test_len(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(len(glyph), 2)
def test_iter(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual([len(contour) for contour in glyph], [4, 4])
def test_copyDataFromGlyph(self):
source = Glyph()
source.name = "a"
source.width = 1
source.height = 2
source.unicodes = [3, 4]
source.note = "test image"
source.image = dict(fileName="test image", xScale=1, xyScale=1,
yxScale=1, yScale=1, xOffset=0, yOffset=0,
color=None)
source.anchors = [dict(x=100, y=200, name="test anchor")]
source.guidelines = [dict(x=10, y=20, name="test guideline")]
source.lib = {"foo": "bar"}
pen = source.getPointPen()
pen.beginPath()
pen.addPoint((100, 200), segmentType="line")
pen.addPoint((300, 400), segmentType="line")
pen.endPath()
component = Component()
component.base = "b"
source.appendComponent(component)
dest = Glyph()
dest.copyDataFromGlyph(source)
self.assertNotEqual(source.name, dest.name)
self.assertEqual(source.width, dest.width)
self.assertEqual(source.height, dest.height)
self.assertEqual(source.unicodes, dest.unicodes)
self.assertEqual(source.note, dest.note)
self.assertEqual(source.image.items(), dest.image.items())
self.assertEqual([g.items() for g in source.guidelines],
[g.items() for g in dest.guidelines])
self.assertEqual([g.items() for g in source.anchors],
[g.items() for g in dest.anchors])
self.assertEqual(len(source), len(dest))
self.assertEqual(len(source.components), len(dest.components))
sourceContours = []
for contour in source:
sourceContours.append([])
for point in contour:
sourceContours[-1].append((point.x, point.x,
point.segmentType, point.name))
destContours = []
for contour in dest:
destContours.append([])
for point in contour:
destContours[-1].append((point.x, point.x,
point.segmentType, point.name))
self.assertEqual(sourceContours, destContours)
self.assertEqual(source.components[0].baseGlyph,
dest.components[0].baseGlyph)
def test_clear(self):
font = Font(getTestFontPath())
glyph = font["A"]
contour = glyph[0]
anchor = glyph.anchors[0]
glyph.clear()
self.assertEqual(len(glyph), 0)
self.assertEqual(len(glyph.anchors), 0)
glyph = font["C"]
component = glyph.components[0]
glyph.clear()
self.assertEqual(len(glyph.components), 0)
glyph = font.layers["Layer 1"]["A"]
guideline = glyph.guidelines[0]
glyph.clear()
self.assertEqual(len(glyph.guidelines), 0)
self.assertEqual((contour.getParent(), component.getParent(),
anchor.getParent(), guideline.getParent()),
(None, None, None, None))
self.assertEqual((contour.dispatcher, component.dispatcher,
anchor.dispatcher, guideline.dispatcher),
(None, None, None, None))
def test_move(self):
font = Font(getTestFontPath())
glyph = font["A"]
xMin, yMin, xMax, yMax = glyph.bounds
glyph.move((100, 50))
self.assertEqual((xMin+100, yMin+50, xMax+100, yMax+50),
glyph.bounds)
glyph = font["C"]
xMin, yMin, xMax, yMax = glyph.bounds
glyph.move((100, 50))
self.assertEqual((xMin+100, yMin+50, xMax+100, yMax+50), glyph.bounds)
def test_pointInside(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertTrue(glyph.pointInside((100, 100)))
self.assertFalse(glyph.pointInside((350, 350)))
self.assertFalse(glyph.pointInside((-100, -100)))
def test_area(self):
font = Font()
baseGlyph = font.newGlyph("baseGlyph")
pointPen = baseGlyph.getPointPen()
pointPen.beginPath()
pointPen.addPoint((0, 0), "move")
pointPen.addPoint((0, 100), "line")
pointPen.addPoint((100, 100), "line")
pointPen.addPoint((100, 0), "line")
pointPen.addPoint((0, 0), "line")
pointPen.endPath()
self.assertEqual(baseGlyph.area, 10000)
componentGlyph = font.newGlyph("componentGlyph")
pointPen = componentGlyph.getPointPen()
pointPen.addComponent("baseGlyph", [1, 0, 0, 1, 0, 0])
self.assertEqual(componentGlyph.area, 10000)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "1f68da7ccbcaa0817d5016b384af141d",
"timestamp": "",
"source": "github",
"line_count": 647,
"max_line_length": 78,
"avg_line_length": 36.80216383307573,
"alnum_prop": 0.6069883667212633,
"repo_name": "moyogo/defcon",
"id": "784a386ddd16da7bdb1f6e512408782bd01686db",
"size": "23811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/defcon/test/objects/test_glyph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629945"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='project',
unique_together=set([('name', 'user')]),
),
]
| {
"content_hash": "287a1210dbd86d3bd254fdbb391aa48e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 19.764705882352942,
"alnum_prop": 0.5803571428571429,
"repo_name": "polarkac/TaskTracker",
"id": "1b2110c9c1bb528e0f26a44beec8b2eef95135ee",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/migrations/0002_auto_20160510_2237.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "472"
},
{
"name": "HTML",
"bytes": "9260"
},
{
"name": "Python",
"bytes": "34344"
}
],
"symlink_target": ""
} |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test a basic remap of a http connection
'''
# need Curl
Test.SkipUnless(
Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")
)
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess("ts")
server = Test.MakeOriginServer("server")
dns = Test.MakeDNServer("dns")
Test.testName = ""
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# expected response from the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# add response to the server dictionary
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'http.*|dns',
'proxy.config.http.referer_filter': 1,
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL'
})
ts.Disk.remap_config.AddLine(
'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'redirect http://test3.com http://httpbin.org'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'map_with_referer http://test4.com http://127.0.0.1:{0} http://httpbin.org (.*[.])?persia[.]com'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'map http://testDNS.com http://audrey.hepburn.com:{0}'.format(server.Variables.Port)
)
dns.addRecords(records={"audrey.hepburn.com.": ["127.0.0.1"]})
# call localhost straight
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/" --verbose'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
# time delay as proxy.config.http.wait_for_cache could be broken
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(dns)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Streams.stderr = "gold/remap-hitATS-404.gold"
tr.StillRunningAfter = server
# www.example.com host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: keep-alive" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-200.gold"
# www.example.com:80 host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com:80/" -H "Proxy-Connection: keep-alive" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-200.gold"
# www.example.com:8080 host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com:8080" -H "Proxy-Connection: keep-alive" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-200.gold"
# no rule for this
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.test.com/" -H "Proxy-Connection: keep-alive" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-404.gold"
# redirect result
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://test3.com" --verbose'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-redirect.gold"
# referer hit
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://test4.com" --header "Referer: persia.com" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-referer-hit.gold"
# referer miss
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://test4.com" --header "Referer: monkey.com" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-referer-miss.gold"
# referer hit
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://test4.com" --header "Referer: www.persia.com" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-referer-hit.gold"
# DNS test
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://testDNS.com" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stderr = "gold/remap-DNS-200.gold"
| {
"content_hash": "294770ca633ce613551c1415577aa1e0",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 143,
"avg_line_length": 41.45985401459854,
"alnum_prop": 0.7246478873239437,
"repo_name": "reveller/trafficserver",
"id": "1be7211ad766deea2e6fcf75a9dae2b8d09d6d58",
"size": "5680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gold_tests/remap/remap_http.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13053"
},
{
"name": "C",
"bytes": "3417648"
},
{
"name": "C++",
"bytes": "11241509"
},
{
"name": "CSS",
"bytes": "8089"
},
{
"name": "HTML",
"bytes": "238770"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "JavaScript",
"bytes": "1609"
},
{
"name": "Lex",
"bytes": "4029"
},
{
"name": "Lua",
"bytes": "380105"
},
{
"name": "M4",
"bytes": "273002"
},
{
"name": "Makefile",
"bytes": "200049"
},
{
"name": "Objective-C",
"bytes": "11203"
},
{
"name": "Perl",
"bytes": "67341"
},
{
"name": "Perl 6",
"bytes": "1329"
},
{
"name": "Python",
"bytes": "428815"
},
{
"name": "Roff",
"bytes": "2339"
},
{
"name": "Shell",
"bytes": "87510"
},
{
"name": "Vim script",
"bytes": "192"
},
{
"name": "Yacc",
"bytes": "3251"
}
],
"symlink_target": ""
} |
import inspect
from functools import wraps
from time import perf_counter
DO_TIMING = False
DISPLAY_LESS_PROGRESS = False
timer_dict = {}
counter = 0
def time(f):
@wraps(f)
def wrap(*args, **kw):
if DO_TIMING:
# Run function with timing
ts = perf_counter()
result = f(*args, **kw)
te = perf_counter()
tt = te - ts
# Get function name
arg_names = inspect.getfullargspec(f)[0]
if arg_names[0] == "self" and DISPLAY_LESS_PROGRESS:
return result
elif arg_names[0] == "self":
method_name = type(args[0]).__name__ + "." + f.__name__
else:
method_name = f.__name__
# Record accumulative time in each function for analysis
if method_name in timer_dict.keys():
timer_dict[method_name] += tt
else:
timer_dict[method_name] = tt
# If code is finished, display timing summary
if method_name == "Evaluator.evaluate":
print("")
print("Timing analysis:")
for key, value in timer_dict.items():
print("%-70s %2.4f sec" % (key, value))
else:
# Get function argument values for printing special arguments of interest
arg_titles = ["tracker", "seq", "cls"]
arg_vals = []
for i, a in enumerate(arg_names):
if a in arg_titles:
arg_vals.append(args[i])
arg_text = "(" + ", ".join(arg_vals) + ")"
# Display methods and functions with different indentation.
if arg_names[0] == "self":
print("%-74s %2.4f sec" % (" " * 4 + method_name + arg_text, tt))
elif arg_names[0] == "test":
pass
else:
global counter
counter += 1
print("%i %-70s %2.4f sec" % (counter, method_name + arg_text, tt))
return result
else:
# If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing.
return f(*args, **kw)
return wrap
| {
"content_hash": "1c564ae8f19b5502673a00b4adceaed8",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 124,
"avg_line_length": 35.27272727272727,
"alnum_prop": 0.4789518900343643,
"repo_name": "SysCV/tet",
"id": "daf147ab73b5f6636a56dd396c0ceec800a27c1a",
"size": "2328",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "teta/teta/_timing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "379313"
},
{
"name": "Shell",
"bytes": "1714"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.