code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
"""Usage: compile_to_bitcode.py <source> [options]
"""
import sys
import subprocess
import os
def change_suffix(src, suffix):
out, _ = os.path.splitext(src)
out += "." + suffix
return out
def compile_benchmark(src, args, output = None, fix_inline=False,
fix_volatile=False, lart_path=None, supress_output=False, arch=32):
if not output:
output = change_suffix(src, "ll")
if fix_inline:
args += ["-fno-inline"]
args += ["-m{}".format(arch), "-emit-llvm", "-fgnu89-inline"]
suff = ""
if supress_output:
suff = " 2> /dev/null"
if fix_volatile:
lart_path = os.path.join(lart_path, "lart")
args_no_opt = [x for x in args if not x.startswith("-O")]
cmd = "clang-3.5 -c {0} -o {1} {2}".format(" ".join(args_no_opt), output, src) + suff
if not supress_output:
print cmd
if os.system(cmd) != 0:
print("ERROR")
print("First phase compilation failed")
return ""
cmd = lart_path + " {0} {1} main-volatilize".format(output, output) + suff
if not supress_output:
print(cmd)
if os.system(cmd) != 0:
print("ERROR")
print("Transformation failed")
return ""
cmd = "clang-3.5 -S {0} -o {1} {2}".format(" ".join(args), output, output) + suff
if not supress_output:
print(cmd)
if os.system(cmd) != 0:
print("ERROR")
print("Second phase compilation failed")
return ""
else:
print("Running without LART")
cmd = "clang-3.5 -S {0} -o {1} {2}".format(" ".join(args), output, src) + suff
if not supress_output:
print(cmd)
if os.system(cmd) != 0:
print("ERROR")
print("Compilation failed")
return ""
return output
if __name__ == "__main__":
if len(sys.argv) == 1:
print(__def__)
sys.exit(1)
it = iter(sys.argv)
it.next()
src = it.next()
args = list(it)
compile_benchmark(src, args, fix_inline=True, fix_volatile=True, lart_path="bin")
|
yaqwsx/SymDIVINE
|
scripts/compile_to_bitcode.py
|
Python
|
mit
| 2,169
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For PickledScheduler.
"""
import datetime
import StringIO
from nova.openstack.common import jsonutils
from nova.scheduler import scheduler_options
from nova import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = filedata
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
return StringIO.StringIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.NoDBTestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEquals(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
|
Brocade-OpenSource/OpenStack-DNRM-Nova
|
nova/tests/scheduler/test_scheduler_options.py
|
Python
|
apache-2.0
| 5,241
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.datastore_v1.proto import (
datastore_pb2 as google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2,
)
class DatastoreStub(object):
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Lookup = channel.unary_unary(
"/google.datastore.v1.Datastore/Lookup",
request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupResponse.FromString,
)
self.RunQuery = channel.unary_unary(
"/google.datastore.v1.Datastore/RunQuery",
request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryResponse.FromString,
)
self.BeginTransaction = channel.unary_unary(
"/google.datastore.v1.Datastore/BeginTransaction",
request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionResponse.FromString,
)
self.Commit = channel.unary_unary(
"/google.datastore.v1.Datastore/Commit",
request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitResponse.FromString,
)
self.Rollback = channel.unary_unary(
"/google.datastore.v1.Datastore/Rollback",
request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackResponse.FromString,
)
self.AllocateIds = channel.unary_unary(
"/google.datastore.v1.Datastore/AllocateIds",
request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsResponse.FromString,
)
self.ReserveIds = channel.unary_unary(
"/google.datastore.v1.Datastore/ReserveIds",
request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsResponse.FromString,
)
class DatastoreServicer(object):
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def Lookup(self, request, context):
"""Looks up entities by key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunQuery(self, request, context):
"""Queries for entities.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BeginTransaction(self, request, context):
"""Begins a new transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Commit(self, request, context):
"""Commits a transaction, optionally creating, deleting or modifying some
entities.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Rollback(self, request, context):
"""Rolls back a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AllocateIds(self, request, context):
"""Allocates IDs for the given keys, which is useful for referencing an entity
before it is inserted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ReserveIds(self, request, context):
"""Prevents the supplied keys' IDs from being auto-allocated by Cloud
Datastore.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_DatastoreServicer_to_server(servicer, server):
rpc_method_handlers = {
"Lookup": grpc.unary_unary_rpc_method_handler(
servicer.Lookup,
request_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupRequest.FromString,
response_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupResponse.SerializeToString,
),
"RunQuery": grpc.unary_unary_rpc_method_handler(
servicer.RunQuery,
request_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryRequest.FromString,
response_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryResponse.SerializeToString,
),
"BeginTransaction": grpc.unary_unary_rpc_method_handler(
servicer.BeginTransaction,
request_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionRequest.FromString,
response_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionResponse.SerializeToString,
),
"Commit": grpc.unary_unary_rpc_method_handler(
servicer.Commit,
request_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitRequest.FromString,
response_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitResponse.SerializeToString,
),
"Rollback": grpc.unary_unary_rpc_method_handler(
servicer.Rollback,
request_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackRequest.FromString,
response_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackResponse.SerializeToString,
),
"AllocateIds": grpc.unary_unary_rpc_method_handler(
servicer.AllocateIds,
request_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsRequest.FromString,
response_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsResponse.SerializeToString,
),
"ReserveIds": grpc.unary_unary_rpc_method_handler(
servicer.ReserveIds,
request_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsRequest.FromString,
response_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.datastore.v1.Datastore", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
tseaver/google-cloud-python
|
datastore/google/cloud/datastore_v1/proto/datastore_pb2_grpc.py
|
Python
|
apache-2.0
| 8,633
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from uuid import uuid4
from sqlalchemy.dialects.postgresql import INET, UUID
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime
from indico.util.date_time import now_utc
from indico.util.string import return_ascii
class APIKey(db.Model):
"""API keys for users"""
__tablename__ = 'api_keys'
__table_args__ = (db.Index(None, 'user_id', unique=True, postgresql_where=db.text('is_active')),
{'schema': 'users'})
#: api key id
id = db.Column(
db.Integer,
primary_key=True
)
#: unique api key for a user
token = db.Column(
UUID,
nullable=False,
unique=True,
default=lambda: unicode(uuid4())
)
#: secret key used for signed requests
secret = db.Column(
UUID,
nullable=False,
default=lambda: unicode(uuid4())
)
#: ID of the user associated with the key
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
nullable=False,
index=True,
)
#: if the key is the currently active key for the user
is_active = db.Column(
db.Boolean,
nullable=False,
default=True
)
#: if the key has been blocked by an admin
is_blocked = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: if persistent signatures are allowed
is_persistent_allowed = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: the time when the key has been created
created_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
#: the last time when the key has been used
last_used_dt = db.Column(
UTCDateTime,
nullable=True
)
#: the last ip address from which the key has been used
last_used_ip = db.Column(
INET,
nullable=True
)
#: the last URI this key was used with
last_used_uri = db.Column(
db.String,
nullable=True
)
#: if the last use was from an authenticated request
last_used_auth = db.Column(
db.Boolean,
nullable=True
)
#: the number of times the key has been used
use_count = db.Column(
db.Integer,
nullable=False,
default=0
)
#: the user associated with this API key
user = db.relationship(
'User',
lazy=False
)
@return_ascii
def __repr__(self):
return '<APIKey({}, {}, {})>'.format(self.token, self.user_id, self.last_used_dt or 'never')
def register_used(self, ip, uri, authenticated):
"""Updates the last used information"""
self.last_used_dt = now_utc()
self.last_used_ip = ip
self.last_used_uri = uri
self.last_used_auth = authenticated
self.use_count = APIKey.use_count + 1
|
nop33/indico
|
indico/modules/api/models/keys.py
|
Python
|
gpl-3.0
| 3,652
|
from random import choice
from string import ascii_letters, digits
import six
def generate_random_string(length=20):
return ''.join((choice(ascii_letters + digits)
for _ in six.moves.xrange(length)))
def strip_leading_spaces(source):
r"""
This is to help you write multilingual strings as test inputs in your
tests without screwing up your code's syntax. Eg::
'''
1
00:01:28.797 --> 00:01:30.297 X:240 Y:480
Hello world
'''
will be converted to::
'\n1\n00:01:28.797 --> 00:01:30.297 X:240 Y:480\nHello world\n'
"""
return '\n'.join((line.lstrip() for line in source.split('\n')))
def bytes_to_string(_bytes):
for byte in _bytes:
assert len(byte) == 1, six.text_type(_bytes)
return u''.join(_bytes)
|
transifex/openformats
|
openformats/tests/utils/strings.py
|
Python
|
gpl-3.0
| 833
|
"""Views for the pages app."""
from django.views.generic.detail import DetailView
from .models import Page
class PageDetail(DetailView):
"""Page detail view."""
model = Page
def get_template_names(self):
names = super(PageDetail, self).get_template_names()
names.append('arpegio-pages/page_detail.html')
return names
|
arpegio-dj/arpegio
|
arpegio/pages/views.py
|
Python
|
bsd-3-clause
| 357
|
"{foo[bar]}".format(foo=dict(bar=42))
<warning descr="Too few arguments for format string">"{foo[bar]}"</warning>.format(foo=dict(foo=42))
|
jwren/intellij-community
|
python/testData/inspections/PyStringFormatInspection/NewStyleMappingKeyWithSubscriptionDictCall.py
|
Python
|
apache-2.0
| 138
|
# (c) Copyright [2015] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from django.utils import safestring
from horizon import forms
from horizon import tables
import horizon_hpe_storage.api.keystone_api as keystone
import horizon_hpe_storage.api.barbican_api as barbican
class LicenseLink(tables.LinkAction):
name = "licenses"
verbose_name = _("View License Information")
classes = ("btn-log",)
def get_link_url(self, datum):
link_url = "storage_arrays/" + \
datum['name'] + "::" + datum['test_name'] + "/system_detail"
# tab_query_string = tabs.LicenseTab(
# tabs.BackendDetailTabs).get_query_string()
# return "?".join([base_url, tab_query_string])
return link_url
class OpenstackFeaturesLink(tables.LinkAction):
name = "openstack_features"
verbose_name = _("View License Information")
classes = ("btn-log",)
def get_link_url(self, datum):
link_url = "storage_arrays/" + \
datum['name'] + "::" + datum['test_name'] + "/license_detail"
# tab_query_string = tabs.LicenseTab(
# tabs.BackendDetailTabs).get_query_string()
# return "?".join([base_url, tab_query_string])
return link_url
class RunDiscoveryAction(tables.LinkAction):
name = "run_discovery"
verbose_name = _("Discover Storage Arrays")
url = "horizon:admin:hpe_storage:storage_arrays:discover_arrays"
classes = ("ajax-modal",)
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
def allowed(self, request, node=None):
self.keystone_api.do_setup(request)
self.barbican_api.do_setup(self.keystone_api.get_session())
return self.barbican_api.nodes_exist(
barbican.CINDER_NODE_TYPE)
def get_pool_name(pool_name):
try:
url = reverse("horizon:admin:hpe_storage:storage_arrays:" +
"pool_detail", args=(pool_name,)) + "pool_details"
pool = '<a href="%s">%s</a>' % (url, html.escape(pool_name))
except NoReverseMatch:
pool = html.escape(pool_name)
return pool
class PoolsColumn(tables.Column):
# Customized column class.
def get_raw_data(self, backend_system):
link = _('%(pool_name)s')
pool_name_start = backend_system['host_name'] + "@"
pools = []
for cinder_host in backend_system['cinder_hosts']:
pool_name = get_pool_name(pool_name_start + cinder_host)
vals = {"pool_name": pool_name}
pools.append(link % vals)
return safestring.mark_safe("<br>".join(pools))
class StorageArraysTable(tables.DataTable):
system_name = tables.Column(
'name',
verbose_name=_('Array Name'),
form_field=forms.CharField(max_length=64))
system_ip = tables.Column(
'ip_address',
verbose_name=_('IP Address'),
form_field=forms.CharField(max_length=64))
model = tables.Column(
'model',
verbose_name=_('Model'),
form_field=forms.CharField(max_length=64))
serial_number = tables.Column(
'serial_number',
verbose_name=_('Serial Number'),
form_field=forms.CharField(max_length=64))
os_version = tables.Column(
'os_version',
verbose_name=_('OS Version'),
form_field=forms.CharField(max_length=64))
wsapi_version = tables.Column(
'wsapi_version',
verbose_name=_('WSAPI Version'),
form_field=forms.CharField(max_length=64))
pools = PoolsColumn(
"pools",
verbose_name=_("Cinder Hosts (Pools)"),
wrap_list=True)
def get_object_id(self, storage_array):
return storage_array['name'] + "::" + storage_array['test_name']
class Meta(object):
name = "storage_arrays"
verbose_name = _("Discovered by Diagnostic Tests")
# hidden_title = False
table_actions = (RunDiscoveryAction,)
row_actions = (LicenseLink,)
|
hpe-storage/horizon-hpe-storage-ui
|
horizon_hpe_storage/storage_panel/storage_arrays/tables.py
|
Python
|
apache-2.0
| 4,728
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "final_age.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
gauravsitlani/programming
|
final_age/python/manage.py
|
Python
|
gpl-3.0
| 807
|
from django.contrib import admin
from blog.models import Article, Comment
# Register your models here.
admin.site.register(Article)
admin.site.register(Comment)
|
MikeLing/personalblog
|
blog/admin.py
|
Python
|
mit
| 161
|
from __future__ import print_function
__author__ = 'Robert Smallshire'
import sys
class Node(object):
def __init__(self, indent=None, lines=None, parent=None):
if indent is not None:
self.indent = indent
else:
self.indent = 0
if lines is not None:
self.lines = lines
else:
self.lines = []
self._parent = parent
self.children = []
parent = property(lambda self: self._parent)
def add_child(self, child):
assert(child.parent is self)
self.children.append(child)
def __repr__(self):
return "Node(" + repr(self.indent) + ", " + repr(self.lines)\
+ ", children=" + repr(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
prefix = ' ' * self.indent
result.extend(prefix + line for line in self.lines)
for child in self.children:
result.extend(child.render_rst())
return result
class Arg(Node):
def __init__(self, indent, name):
super(Arg, self).__init__(indent)
self.name = name
self.type = None
def __repr__(self):
return "Arg(" + repr(self.name) + ", " + repr(self.type) + ", children=" + repr(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the param description
description = []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
name = self.name.replace('*', r'\*')
first_description = description[0].lstrip() if len(description) else ''
if not first_description:
print("Missing argument description for {name}".format(name=self.name), file=sys.stderr)
result.append("{indent}:param {name}: {first_description}".format(
indent=indent, name=name,
first_description=first_description))
result.extend(description[1:])
# If a type was specified render the type
if self.type is not None:
result.append("{indent}:type {name}: {type}".format(indent=indent,
name=self.name, type=self.type))
result.append('')
ensure_terminal_blank(result)
return result
class Attribute(Node):
def __init__(self, indent, name):
super(Attribute, self).__init__(indent)
self.name = name
self.type = None
def __repr__(self):
return "Attribute(" + repr(self.name) + ", " + repr(self.type)\
+ ", children=" + repr(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the attribute description
description = []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
if self.type is not None:
if len(description) > 0:
description[0] = "({t}) {desc}".format(
t=self.type, desc=description[0].lstrip())
else:
description = ["({t})".format(t=self.type)]
# The description lines should be indented by three characters to line
# up with the directive.
reindented_description = [" " + line.lstrip() for line in description]
result.append(".. py:attribute:: {name}".format(name=self.name))
if len(reindented_description) > 0:
result.append('')
result.extend(reindented_description)
ensure_terminal_blank(result)
return result
class Raises(Node):
def __init__(self, indent=None):
super(Raises, self).__init__(indent=indent)
def __repr__(self):
return "Raises(" + repr(self.indent) + ", children=" + repr(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
result.append(indent + ':raises:')
for child in self.children:
result.extend(child.render_rst(only_child=len(self.children) == 1))
ensure_terminal_blank(result)
return result
class Except(Node):
def __init__(self, indent, type):
super(Except, self).__init__(indent=indent)
#self.child_indent = child_indent
self.type = type
def __repr__(self):
return "Except(" + repr(self.type) + ", children="\
+ repr(self.children) + ")"
def render_rst(self, only_child=False, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the param description
description = []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
bullet = '* ' if not only_child else ''
first_description = description[0].lstrip() if len(description) else ''
if not first_description:
# TODO: Emit a warning about a missing argument description
pass
result.append("{indent}{bullet}{type} - {first_description}".format(
indent=indent,
bullet=bullet, type=self.type,
first_description=first_description))
result.extend(description[1:])
ensure_terminal_blank(result)
return result
class Returns(Node):
def __init__(self, indent):
super(Returns, self).__init__(indent=indent)
self.title = 'Returns'
self.line = ''
def __repr__(self):
return "Returns(" + str(self.indent) + ", children=" + str(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the return description
description = [self.line] if self.line else []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
self.render_title(description, indent, result)
result.extend(description[1:])
ensure_terminal_blank(result)
return result
def render_title(self, description, indent, result):
first_description = description[0].lstrip() if len(description) else ''
result.append(
"{indent}:returns: {first_description}".format(indent=indent,
first_description=first_description))
class Yields(Node):
def __init__(self, indent):
super(Yields, self).__init__(indent=indent)
self.title = 'Returns'
self.line = ''
def __repr__(self):
return "Yields(" + str(self.indent) + ", children=" + str(self.children) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the return description
description = [self.line] if self.line else []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
self.render_title(description, indent, result)
result.extend(description[1:])
ensure_terminal_blank(result)
return result
def render_title(self, description, indent, result):
first_description = description[0].lstrip() if len(description) else ''
result.append(
"{indent}:returns: {first_description}".format(indent=indent,
first_description=first_description))
class Warning(Node):
def __init__(self, indent):
super(Warning, self).__init__(indent=indent)
self.line = '' # TODO: Can't we use self.lines in the superclass for this?
def __repr__(self):
return "Warning(" + repr(self.indent) + ", children=" + str(self.children) + ")"
def render_rst(self, *args, **kwargs):
# TODO: Factor out the commonality between this and Note below
result = []
indent = ' ' * self.indent
# Render the param description
description = [indent + ' ' + self.line] if self.line else []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
result.append(indent + ".. warning::")
result.append(indent + '')
result.extend(description)
ensure_terminal_blank(result)
return result
class Note(Node):
def __init__(self, indent):
super(Note, self).__init__(indent=indent)
self.line = '' # TODO: Can't we use self.lines in the superclass for this?
def __repr__(self):
return "Note(" + repr(self.indent) + ", children=" + str(self.children) + ")"
def render_rst(self, *args, **kwargs):
# TODO: Factor out the commonality between this and Warning above
result = []
indent = ' ' * self.indent
# Render the param description
description = [indent + ' ' + self.line] if self.line else []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
result.append(indent + ".. note::")
result.append(indent + '')
result.extend(description)
ensure_terminal_blank(result)
return result
class Usage(Node):
def __init__(self, indent):
super(Usage, self).__init__(indent=indent)
self.line = '' # TODO: Can't we use self.lines in the superclass for this?
self.lang = 'python'
def __repr__(self):
return "Usage(" + repr(self.indent) + ")"
def render_rst(self, *args, **kwargs):
result = []
indent = ' ' * self.indent
# Render the param description
description = []
for child in self.children:
child_lines = child.render_rst()
description.extend(child_lines)
if len(description) > 0:
minimum_code_indent = min(len(codeline) - len(codeline.lstrip()) for codeline in description if not codeline.isspace())
codelines = [codeline[minimum_code_indent:] for codeline in description]
result.append(indent + ".. rubric:: Usage:")
result.append('')
result.append(indent + '.. code-block:: {lang}'.format(lang=self.lang))
result.append('')
for codeline in codelines:
result.append(indent + ' ' + codeline)
else:
print("No code in Usage block. Skipping!", file=sys.stderr)
ensure_terminal_blank(result)
return result
def ensure_terminal_blank(result):
'''If the description didn't end with a blank line add one here.'''
if len(result) > 0:
if len(result[-1].strip()) != 0:
result.append('')
|
rob-smallshire/cartouche
|
cartouche/nodes.py
|
Python
|
bsd-3-clause
| 11,215
|
from rest_framework import serializers
from crowdsourcing import constants
from crowdsourcing.serializers.dynamic import DynamicFieldsModelSerializer
from crowdsourcing.models import Qualification, QualificationItem, WorkerAccessControlEntry, \
RequesterAccessControlGroup
from crowdsourcing.tasks import update_worker_cache
class QualificationSerializer(DynamicFieldsModelSerializer):
class Meta:
model = Qualification
fields = ('id', 'name', 'description')
def create(self, owner, *args, **kwargs):
return Qualification.objects.create(owner=owner, **self.validated_data)
class QualificationItemSerializer(DynamicFieldsModelSerializer):
class Meta:
model = QualificationItem
fields = ('id', 'expression', 'qualification')
def create(self, *args, **kwargs):
return QualificationItem.objects.create(**self.validated_data)
def update(self, *args, **kwargs):
self.instance.expression = self.validated_data.get('expression', self.instance.expression)
self.instance.save()
return self.instance
class RequesterACGSerializer(DynamicFieldsModelSerializer):
class Meta:
model = RequesterAccessControlGroup
fields = ('id', 'requester', 'is_global', 'type', 'name')
read_only_fields = ('requester',)
def create(self, requester, *args, **kwargs):
return RequesterAccessControlGroup.objects.create(requester=requester, **self.validated_data)
def create_with_entries(self, requester, entries, *args, **kwargs):
group = RequesterAccessControlGroup.objects.create(requester=requester, **self.validated_data)
for entry in entries:
d = {
'group': group.id,
'worker': entry
}
entry_serializer = WorkerACESerializer(data=d)
if entry_serializer.is_valid():
entry_serializer.create()
else:
raise ValueError('Invalid user ids')
update_worker_cache(entries, constants.ACTION_GROUPADD, value=group.id)
return group
class WorkerACESerializer(DynamicFieldsModelSerializer):
worker_alias = serializers.SerializerMethodField()
class Meta:
model = WorkerAccessControlEntry
fields = ('id', 'worker', 'worker_alias', 'group', 'created_at')
def create(self, *args, **kwargs):
return WorkerAccessControlEntry.objects.create(**self.validated_data)
@staticmethod
def get_worker_alias(obj):
return obj.worker.username
|
shirishgoyal/crowdsource-platform
|
crowdsourcing/serializers/qualification.py
|
Python
|
mit
| 2,547
|
u'''
Created on Sep 13, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
import os, io, sys, json
from collections import defaultdict
from arelle import XbrlConst
from arelle.ModelObject import ModelObject
from arelle.ModelValue import QName
from arelle.ModelFormulaObject import Aspect
from arelle.ModelRenderingObject import (ModelEuTable, ModelTable, ModelBreakdown,
ModelEuAxisCoord, ModelDefinitionNode, ModelClosedDefinitionNode, ModelRuleDefinitionNode,
ModelRelationshipDefinitionNode, ModelSelectionDefinitionNode, ModelFilterDefinitionNode,
ModelConceptRelationshipDefinitionNode, ModelDimensionRelationshipDefinitionNode,
ModelCompositionDefinitionNode, ModelTupleDefinitionNode, StructuralNode,
ROLLUP_NOT_ANALYZED, CHILDREN_BUT_NO_ROLLUP, CHILD_ROLLUP_FIRST, CHILD_ROLLUP_LAST,
OPEN_ASPECT_ENTRY_SURROGATE)
from arelle.PrototypeInstanceObject import FactPrototype
RENDER_UNITS_PER_CHAR = 16 # nominal screen units per char for wrapLength computation and adjustment
class ResolutionException(Exception):
def __init__(self, code, message, **kwargs):
self.kwargs = kwargs
self.code = code
self.message = message
self.args = ( self.__repr__(), )
def __repr__(self):
return _(u'[{0}] exception {1}').format(self.code, self.message % self.kwargs)
def resolveAxesStructure(view, viewTblELR):
if isinstance(viewTblELR, (ModelEuTable, ModelTable)):
# called with a modelTable instead of an ELR
# find an ELR for this table object
table = viewTblELR
for rel in view.modelXbrl.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011)).fromModelObject(table):
# find relationships in table's linkrole
view.axisSubtreeRelSet = view.modelXbrl.relationshipSet((XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305, XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011), rel.linkrole)
return resolveTableAxesStructure(view, table,
view.modelXbrl.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011), rel.linkrole))
# no relationships from table found
return (None, None, None, None)
# called with an ELR or list of ELRs
tblAxisRelSet = view.modelXbrl.relationshipSet(XbrlConst.euTableAxis, viewTblELR)
if len(tblAxisRelSet.modelRelationships) > 0:
view.axisSubtreeRelSet = view.modelXbrl.relationshipSet(XbrlConst.euAxisMember, viewTblELR)
else: # try 2011 roles
tblAxisRelSet = view.modelXbrl.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011), viewTblELR)
view.axisSubtreeRelSet = view.modelXbrl.relationshipSet((XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305, XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011), viewTblELR)
if tblAxisRelSet is None or len(tblAxisRelSet.modelRelationships) == 0:
view.modelXbrl.modelManager.addToLog(_(u"no table relationships for {0}").format(viewTblELR))
return (None, None, None, None)
# table name
modelRoleTypes = view.modelXbrl.roleTypes.get(viewTblELR)
if modelRoleTypes is not None and len(modelRoleTypes) > 0:
view.roledefinition = modelRoleTypes[0].definition
if view.roledefinition is None or view.roledefinition == u"":
view.roledefinition = os.path.basename(viewTblELR)
try:
for table in tblAxisRelSet.rootConcepts:
return resolveTableAxesStructure(view, table, tblAxisRelSet)
except ResolutionException, ex:
view.modelXbrl.error(ex.code, ex.message, exc_info=True, **ex.kwargs);
return (None, None, None, None)
def resolveTableAxesStructure(view, table, tblAxisRelSet):
view.dataCols = 0
view.dataRows = 0
view.colHdrNonStdRoles = []
view.colHdrDocRow = False
view.colHdrCodeRow = False
view.colHdrRows = 0
view.rowHdrNonStdRoles = []
view.rowHdrCols = 0
view.rowHdrColWidth = [0,]
view.rowNonAbstractHdrSpanMin = [0,]
view.rowHdrDocCol = False
view.rowHdrCodeCol = False
view.zAxisRows = 0
view.aspectModel = table.aspectModel
view.zmostOrdCntx = None
view.modelTable = table
view.topRollup = {u"x": ROLLUP_NOT_ANALYZED, u"y": ROLLUP_NOT_ANALYZED}
view.aspectEntryObjectId = 0
view.modelTable = table
view.rendrCntx = table.renderingXPathContext
xTopStructuralNode = yTopStructuralNode = zTopStructuralNode = None
# must be cartesian product of top level relationships
tblAxisRels = tblAxisRelSet.fromModelObject(table)
facts = table.filteredFacts(view.rendrCntx, view.modelXbrl.factsInInstance) # apply table filters
view.breakdownNodes = defaultdict(list) # breakdown nodes
for tblAxisRel in tblAxisRels:
definitionNode = tblAxisRel.toModelObject
addBreakdownNode(view, tblAxisRel.axisDisposition, definitionNode)
# do z's first to set variables needed by x and y axes expressions
for disposition in (u"z", u"x", u"y"):
for i, tblAxisRel in enumerate(tblAxisRels):
definitionNode = tblAxisRel.toModelObject
if (tblAxisRel.axisDisposition == disposition and
isinstance(definitionNode, (ModelEuAxisCoord, ModelBreakdown, ModelDefinitionNode))):
if disposition == u"x" and xTopStructuralNode is None:
xTopStructuralNode = StructuralNode(None, definitionNode, definitionNode, view.zmostOrdCntx, tableNode=table, rendrCntx=view.rendrCntx)
xTopStructuralNode.hasOpenNode = False
if isinstance(definitionNode,(ModelBreakdown, ModelClosedDefinitionNode)) and definitionNode.parentChildOrder is not None:
#addBreakdownNode(view, disposition, definitionNode)
view.xTopRollup = CHILD_ROLLUP_LAST if definitionNode.parentChildOrder == u"children-first" else CHILD_ROLLUP_FIRST
expandDefinition(view, xTopStructuralNode, definitionNode, definitionNode, 1, disposition, facts, i, tblAxisRels)
view.dataCols = xTopStructuralNode.leafNodeCount
break
elif disposition == u"y" and yTopStructuralNode is None:
yTopStructuralNode = StructuralNode(None, definitionNode, definitionNode, view.zmostOrdCntx, tableNode=table, rendrCntx=view.rendrCntx)
yTopStructuralNode.hasOpenNode = False
if isinstance(definitionNode,(ModelBreakdown, ModelClosedDefinitionNode)) and definitionNode.parentChildOrder is not None:
#addBreakdownNode(view, disposition, definitionNode)
view.yAxisChildrenFirst.set(definitionNode.parentChildOrder == u"children-first")
view.yTopRollup = CHILD_ROLLUP_LAST if definitionNode.parentChildOrder == u"children-first" else CHILD_ROLLUP_FIRST
expandDefinition(view, yTopStructuralNode, definitionNode, definitionNode, 1, disposition, facts, i, tblAxisRels)
view.dataRows = yTopStructuralNode.leafNodeCount
break
elif disposition == u"z" and zTopStructuralNode is None:
zTopStructuralNode = StructuralNode(None, definitionNode, definitionNode, tableNode=table, rendrCntx=view.rendrCntx)
zTopStructuralNode._choiceStructuralNodes = []
zTopStructuralNode.hasOpenNode = False
#addBreakdownNode(view, disposition, definitionNode)
expandDefinition(view, zTopStructuralNode, definitionNode, definitionNode, 1, disposition, facts, i, tblAxisRels)
break
u'''
def jsonDefaultEncoder(obj):
if isinstance(obj, StructuralNode):
return {'1StructNode': str(obj),
'2Depth': obj.structuralDepth,
'2Group': obj.breakdownNode(view.tblELR).genLabel(),
'3Label': obj.header() or obj.xlinkLabel,
'4ChildNodes': obj.childStructuralNodes}
raise TypeError("Type {} is not supported for json output".format(type(obj).__name__))
with io.open(r"c:\temp\test.json", 'wt') as fh:
json.dump({"x":xTopStructuralNode, "y":yTopStructuralNode, "z":zTopStructuralNode},
fh,
sort_keys=True,
ensure_ascii=False,
indent=2,
default=jsonDefaultEncoder)
'''
view.colHdrTopRow = view.zAxisRows + 1 # need rest if combobox used (2 if view.zAxisRows else 1)
for i in xrange(view.rowHdrCols):
if view.rowNonAbstractHdrSpanMin[i]:
lastRowMinWidth = view.rowNonAbstractHdrSpanMin[i] - sum(view.rowHdrColWidth[i] for j in xrange(i, view.rowHdrCols - 1))
if lastRowMinWidth > view.rowHdrColWidth[view.rowHdrCols - 1]:
view.rowHdrColWidth[view.rowHdrCols - 1] = lastRowMinWidth
#view.rowHdrColWidth = (60,60,60,60,60,60,60,60,60,60,60,60,60,60)
# use as wraplength for all row hdr name columns 200 + fixed indent and abstract mins (not incl last name col)
view.rowHdrWrapLength = 200 + sum(view.rowHdrColWidth[:view.rowHdrCols + 1])
view.dataFirstRow = view.colHdrTopRow + view.colHdrRows + len(view.colHdrNonStdRoles)
view.dataFirstCol = 1 + view.rowHdrCols + len(view.rowHdrNonStdRoles)
#view.dataFirstRow = view.colHdrTopRow + view.colHdrRows + view.colHdrDocRow + view.colHdrCodeRow
#view.dataFirstCol = 1 + view.rowHdrCols + view.rowHdrDocCol + view.rowHdrCodeCol
#for i in range(view.dataFirstRow + view.dataRows):
# view.gridView.rowconfigure(i)
#for i in range(view.dataFirstCol + view.dataCols):
# view.gridView.columnconfigure(i)
# organize hdrNonStdRoles so code (if any) is after documentation (if any)
for hdrNonStdRoles in (view.colHdrNonStdRoles, view.rowHdrNonStdRoles):
iCodeRole = -1
for i, hdrNonStdRole in enumerate(hdrNonStdRoles):
if u'code' in os.path.basename(hdrNonStdRole).lower():
iCodeRole = i
break
if iCodeRole >= 0 and len(hdrNonStdRoles) > 1 and iCodeRole < len(hdrNonStdRoles) - 1:
del hdrNonStdRoles[iCodeRole]
hdrNonStdRoles.append(hdrNonStdRole)
if view.topRollup[u"x"]:
view.xAxisChildrenFirst.set(view.topRollup[u"x"] == CHILD_ROLLUP_LAST)
if view.topRollup[u"y"]:
view.yAxisChildrenFirst.set(view.topRollup[u"y"] == CHILD_ROLLUP_LAST)
return (tblAxisRelSet, xTopStructuralNode, yTopStructuralNode, zTopStructuralNode)
def sortkey(obj):
if isinstance(obj, ModelObject):
return obj.objectIndex
return obj
def addBreakdownNode(view, disposition, node):
if isinstance(node, ModelBreakdown):
axisBreakdowns = view.breakdownNodes[disposition]
if node not in axisBreakdowns:
axisBreakdowns.append(node)
def expandDefinition(view, structuralNode, breakdownNode, definitionNode, depth, axisDisposition, facts, i=None, tblAxisRels=None, processOpenDefinitionNode=True):
subtreeRelationships = view.axisSubtreeRelSet.fromModelObject(definitionNode)
def checkLabelWidth(structuralNode, checkBoundFact=False):
if axisDisposition == u"y":
# messages can't be evaluated, just use the text portion of format string
label = structuralNode.header(lang=view.lang,
returnGenLabel=not checkBoundFact,
returnMsgFormatString=not checkBoundFact)
if label:
# need to et more exact word length in screen units
widestWordLen = max(len(w) * RENDER_UNITS_PER_CHAR for w in label.split())
# abstract only pertains to subtree of closed nodesbut not cartesian products or open nodes
while structuralNode.depth >= len(view.rowHdrColWidth):
view.rowHdrColWidth.append(0)
if definitionNode.isAbstract or not subtreeRelationships: # isinstance(definitionNode, ModelOpenDefinitionNode):
if widestWordLen > view.rowHdrColWidth[structuralNode.depth]:
view.rowHdrColWidth[structuralNode.depth] = widestWordLen
else:
if widestWordLen > view.rowNonAbstractHdrSpanMin[structuralNode.depth]:
view.rowNonAbstractHdrSpanMin[structuralNode.depth] = widestWordLen
if structuralNode and isinstance(definitionNode, (ModelBreakdown, ModelEuAxisCoord, ModelDefinitionNode)):
try:
#cartesianProductNestedArgs = (view, depth, axisDisposition, facts, tblAxisRels, i)
ordCardinality, ordDepth = definitionNode.cardinalityAndDepth(structuralNode)
if (not definitionNode.isAbstract and
isinstance(definitionNode, ModelClosedDefinitionNode) and
ordCardinality == 0):
view.modelXbrl.error(u"xbrlte:closedDefinitionNodeZeroCardinality",
_(u"Closed definition node %(xlinkLabel)s does not contribute at least one structural node"),
modelObject=(view.modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel, axis=definitionNode.localName)
nestedDepth = depth + ordDepth
# HF test
cartesianProductNestedArgs = [view, nestedDepth, axisDisposition, facts, tblAxisRels, i]
if axisDisposition == u"z":
if depth == 1: # choices (combo boxes) don't add to z row count
view.zAxisRows += 1
elif axisDisposition == u"x":
if ordDepth:
if nestedDepth - 1 > view.colHdrRows: view.colHdrRows = nestedDepth - 1
u'''
if not view.colHdrDocRow:
if definitionNode.header(role="http://www.xbrl.org/2008/role/documentation",
lang=view.lang):
view.colHdrDocRow = True
if not view.colHdrCodeRow:
if definitionNode.header(role="http://www.eurofiling.info/role/2010/coordinate-code"):
view.colHdrCodeRow = True
'''
hdrNonStdRoles = view.colHdrNonStdRoles
elif axisDisposition == u"y":
if ordDepth:
#if not definitionNode.isAbstract:
# view.dataRows += ordCardinality
if nestedDepth - 1 > view.rowHdrCols:
view.rowHdrCols = nestedDepth - 1
for j in xrange(1 + ordDepth):
view.rowHdrColWidth.append(RENDER_UNITS_PER_CHAR) # min width for 'tail' of nonAbstract coordinate
view.rowNonAbstractHdrSpanMin.append(0)
checkLabelWidth(structuralNode, checkBoundFact=False)
u'''
if not view.rowHdrDocCol:
if definitionNode.header(role="http://www.xbrl.org/2008/role/documentation",
lang=view.lang):
view.rowHdrDocCol = True
if not view.rowHdrCodeCol:
if definitionNode.header(role="http://www.eurofiling.info/role/2010/coordinate-code"):
view.rowHdrCodeCol = True
'''
hdrNonStdRoles = view.rowHdrNonStdRoles
if axisDisposition in (u"x", u"y"):
hdrNonStdPosition = -1 # where a match last occured
for rel in view.modelXbrl.relationshipSet(XbrlConst.elementLabel).fromModelObject(definitionNode):
if rel.toModelObject is not None and rel.toModelObject.role != XbrlConst.genStandardLabel:
labelLang = rel.toModelObject.xmlLang
labelRole = rel.toModelObject.role
if (labelLang == view.lang or labelLang.startswith(view.lang) or view.lang.startswith(labelLang)
or (u"code" in labelRole)):
labelRole = rel.toModelObject.role
if labelRole in hdrNonStdRoles:
hdrNonStdPosition = hdrNonStdRoles.index(labelRole)
else:
hdrNonStdRoles.insert(hdrNonStdPosition + 1, labelRole)
isCartesianProductExpanded = False
if not isinstance(definitionNode, ModelFilterDefinitionNode):
isCartesianProductExpanded = True
# note: reduced set of facts should always be passed to subsequent open nodes
for axisSubtreeRel in subtreeRelationships:
childDefinitionNode = axisSubtreeRel.toModelObject
if childDefinitionNode.isRollUp:
structuralNode.rollUpStructuralNode = StructuralNode(structuralNode, breakdownNode, childDefinitionNode, )
if not structuralNode.childStructuralNodes: # first sub ordinate is the roll up
structuralNode.subtreeRollUp = CHILD_ROLLUP_FIRST
else:
structuralNode.subtreeRollUp = CHILD_ROLLUP_LAST
if not view.topRollup.get(axisDisposition):
view.topRollup[axisDisposition] = structuralNode.subtreeRollUp
else:
if (isinstance(definitionNode, (ModelBreakdown, ModelCompositionDefinitionNode)) and
isinstance(childDefinitionNode, ModelRelationshipDefinitionNode)): # append list products to composititionAxes subObjCntxs
childStructuralNode = structuralNode
else:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, childDefinitionNode) # others are nested structuralNode
if axisDisposition != u"z":
structuralNode.childStructuralNodes.append(childStructuralNode)
if axisDisposition != u"z":
expandDefinition(view, childStructuralNode, breakdownNode, childDefinitionNode, depth+ordDepth, axisDisposition, facts, i, tblAxisRels) #recurse
if not (isinstance(childStructuralNode.definitionNode, ModelFilterDefinitionNode)
and any([node.isEntryPrototype(default=False) for node in childStructuralNode.childStructuralNodes])) :
# To be computed only if the structural node is not an open node
cartesianProductExpander(childStructuralNode, *cartesianProductNestedArgs)
else:
childStructuralNode.indent = depth - 1
if structuralNode.choiceStructuralNodes is not None:
structuralNode.choiceStructuralNodes.append(childStructuralNode)
expandDefinition(view, childStructuralNode, breakdownNode, childDefinitionNode, depth + 1, axisDisposition, facts) #recurse
# required when switching from abstract to roll up to determine abstractness
#if not structuralNode.subtreeRollUp and structuralNode.childStructuralNodes and definitionNode.tag.endswith("Node"):
# structuralNode.subtreeRollUp = CHILDREN_BUT_NO_ROLLUP
#if not hasattr(structuralNode, "indent"): # probably also for multiple open axes
if processOpenDefinitionNode:
if isinstance(definitionNode, ModelRelationshipDefinitionNode):
structuralNode.isLabeled = False
selfStructuralNodes = {} if definitionNode.axis.endswith(u'-or-self') else None
for rel in definitionNode.relationships(structuralNode):
if not isinstance(rel, list):
relChildStructuralNode = addRelationship(breakdownNode, definitionNode, rel, structuralNode, cartesianProductNestedArgs, selfStructuralNodes)
else:
addRelationships(breakdownNode, definitionNode, rel, relChildStructuralNode, cartesianProductNestedArgs)
if axisDisposition == u"z":
# if definitionNode is first structural node child remove it
if structuralNode.choiceStructuralNodes and structuralNode.choiceStructuralNodes[0].definitionNode == definitionNode:
del structuralNode.choiceStructuralNodes[0]
# flatten hierarchy of nested structural nodes inot choice nodes (for single listbox)
def flattenChildNodesToChoices(childStructuralNodes, indent):
while childStructuralNodes:
choiceStructuralNode = childStructuralNodes.pop(0)
choiceStructuralNode.indent = indent
structuralNode.choiceStructuralNodes.append(choiceStructuralNode)
flattenChildNodesToChoices(choiceStructuralNode.childStructuralNodes, indent + 1)
if structuralNode.childStructuralNodes:
flattenChildNodesToChoices(structuralNode.childStructuralNodes, 0)
# set up by definitionNode.relationships
if isinstance(definitionNode, ModelConceptRelationshipDefinitionNode):
if (definitionNode._sourceQname != XbrlConst.qnXfiRoot and
definitionNode._sourceQname not in view.modelXbrl.qnameConcepts):
view.modelXbrl.error(u"xbrlte:invalidConceptRelationshipSource",
_(u"Concept relationship rule node %(xlinkLabel)s source %(source)s does not refer to an existing concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, source=definitionNode._sourceQname)
elif isinstance(definitionNode, ModelDimensionRelationshipDefinitionNode):
dim = view.modelXbrl.qnameConcepts.get(definitionNode._dimensionQname)
if dim is None or not dim.isExplicitDimension:
view.modelXbrl.error(u"xbrlte:invalidExplicitDimensionQName",
_(u"Dimension relationship rule node %(xlinkLabel)s dimension %(dimension)s does not refer to an existing explicit dimension."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, dimension=definitionNode._dimensionQname)
domMbr = view.modelXbrl.qnameConcepts.get(definitionNode._sourceQname)
if domMbr is None or not domMbr.isDomainMember:
view.modelXbrl.error(u"xbrlte:invalidDimensionRelationshipSource",
_(u"Dimension relationship rule node %(xlinkLabel)s source %(source)s does not refer to an existing domain member."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, source=definitionNode._sourceQname)
if (definitionNode._axis in (u"child", u"child-or-self", u"parent", u"parent-or-self", u"sibling", u"sibling-or-self") and
(not isinstance(definitionNode._generations, _NUM_TYPES) or definitionNode._generations > 1)):
view.modelXbrl.error(u"xbrlte:relationshipNodeTooManyGenerations ",
_(u"Relationship rule node %(xlinkLabel)s formulaAxis %(axis)s implies a single generation tree walk but generations %(generations)s is greater than one."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, axis=definitionNode._axis, generations=definitionNode._generations)
elif isinstance(definitionNode, ModelSelectionDefinitionNode):
structuralNode.setHasOpenNode()
structuralNode.isLabeled = False
isCartesianProductExpanded = True
varQn = definitionNode.variableQname
if varQn:
selections = sorted(structuralNode.evaluate(definitionNode, definitionNode.evaluate) or [],
key=lambda obj:sortkey(obj))
if isinstance(selections, (list,set,tuple)) and len(selections) > 1:
for selection in selections: # nested choices from selection list
childStructuralNode = StructuralNode(structuralNode, breakdownNode, definitionNode, contextItemFact=selection)
childStructuralNode.variables[varQn] = selection
childStructuralNode.indent = 0
if axisDisposition == u"z":
structuralNode.choiceStructuralNodes.append(childStructuralNode)
childStructuralNode.zSelection = True
else:
structuralNode.childStructuralNodes.append(childStructuralNode)
expandDefinition(view, childStructuralNode, breakdownNode, definitionNode, depth, axisDisposition, facts, processOpenDefinitionNode=False) #recurse
cartesianProductExpander(childStructuralNode, *cartesianProductNestedArgs)
else:
structuralNode.variables[varQn] = selections
elif isinstance(definitionNode, ModelFilterDefinitionNode):
structuralNode.setHasOpenNode()
structuralNode.isLabeled = False
isCartesianProductExpanded = True
structuralNode.abstract = True # spanning ordinate acts as a subtitle
filteredFactsPartitions = structuralNode.evaluate(definitionNode,
definitionNode.filteredFactsPartitions,
evalArgs=(facts,))
if structuralNode._rendrCntx.formulaOptions.traceVariableFilterWinnowing:
view.modelXbrl.info(u"table:trace",
_(u"Filter node %(xlinkLabel)s facts partitions: %(factsPartitions)s"),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel,
factsPartitions=unicode(filteredFactsPartitions))
# ohly for fact entry (true if no parent open nodes or all are on entry prototype row)
if axisDisposition != u"z":
childList = structuralNode.childStructuralNodes
if structuralNode.isEntryPrototype(default=True):
for i in xrange(getattr(view, u"openBreakdownLines",
# for file output, 1 entry row if no facts
0 if filteredFactsPartitions else 1)):
view.aspectEntryObjectId += 1
filteredFactsPartitions.append([FactPrototype(view, {u"aspectEntryObjectId": OPEN_ASPECT_ENTRY_SURROGATE + unicode(view.aspectEntryObjectId)})])
if structuralNode.isEntryPrototype(default=False):
break # only one node per cartesian product under outermost nested open entry row
else:
childList = structuralNode.choiceStructuralNodes
for factsPartition in filteredFactsPartitions:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, definitionNode, contextItemFact=factsPartition[0])
childStructuralNode.indent = 0
childStructuralNode.depth -= 1 # for label width; parent is merged/invisible
childList.append(childStructuralNode)
checkLabelWidth(childStructuralNode, checkBoundFact=True)
#expandDefinition(view, childStructuralNode, breakdownNode, definitionNode, depth, axisDisposition, factsPartition, processOpenDefinitionNode=False) #recurse
cartesianProductNestedArgs[3] = factsPartition
# note: reduced set of facts should always be passed to subsequent open nodes
if subtreeRelationships:
for axisSubtreeRel in subtreeRelationships:
child2DefinitionNode = axisSubtreeRel.toModelObject
child2StructuralNode = StructuralNode(childStructuralNode, breakdownNode, child2DefinitionNode) # others are nested structuralNode
childStructuralNode.childStructuralNodes.append(child2StructuralNode)
expandDefinition(view, child2StructuralNode, breakdownNode, child2DefinitionNode, depth+ordDepth, axisDisposition, factsPartition) #recurse
cartesianProductExpander(child2StructuralNode, *cartesianProductNestedArgs)
else:
cartesianProductExpander(childStructuralNode, *cartesianProductNestedArgs)
# sort by header (which is likely to be typed dim value, for example)
childList.sort(key=lambda childStructuralNode:
childStructuralNode.header(lang=view.lang,
returnGenLabel=False,
returnMsgFormatString=False)
or u'') # exception on trying to sort if header returns None
# TBD if there is no abstract 'sub header' for these subOrdCntxs, move them in place of parent structuralNode
elif isinstance(definitionNode, ModelTupleDefinitionNode):
structuralNode.abstract = True # spanning ordinate acts as a subtitle
matchingTupleFacts = structuralNode.evaluate(definitionNode,
definitionNode.filteredFacts,
evalArgs=(facts,))
for tupleFact in matchingTupleFacts:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, definitionNode, contextItemFact=tupleFact)
childStructuralNode.indent = 0
structuralNode.childStructuralNodes.append(childStructuralNode)
expandDefinition(view, childStructuralNode, breakdownNode, definitionNode, depth, axisDisposition, [tupleFact]) #recurse
# sort by header (which is likely to be typed dim value, for example)
if (structuralNode.childStructuralNodes and
any(sOC.header(lang=view.lang) for sOC in structuralNode.childStructuralNodes)):
structuralNode.childStructuralNodes.sort(key=lambda childStructuralNode: childStructuralNode.header(lang=view.lang) or u'')
elif isinstance(definitionNode, ModelRuleDefinitionNode):
for constraintSet in definitionNode.constraintSets.values():
for aspect in constraintSet.aspectsCovered():
if not constraintSet.aspectValueDependsOnVars(aspect):
if aspect == Aspect.CONCEPT:
conceptQname = definitionNode.aspectValue(view.rendrCntx, Aspect.CONCEPT)
concept = view.modelXbrl.qnameConcepts.get(conceptQname)
if concept is None or not concept.isItem or concept.isDimensionItem or concept.isHypercubeItem:
view.modelXbrl.error(u"xbrlte:invalidQNameAspectValue",
_(u"Rule node %(xlinkLabel)s specifies concept %(concept)s does not refer to an existing primary item concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, concept=conceptQname)
elif isinstance(aspect, QName):
dim = view.modelXbrl.qnameConcepts.get(aspect)
memQname = definitionNode.aspectValue(view.rendrCntx, aspect)
mem = view.modelXbrl.qnameConcepts.get(memQname)
if dim is None or not dim.isDimensionItem:
view.modelXbrl.error(u"xbrlte:invalidQNameAspectValue",
_(u"Rule node %(xlinkLabel)s specifies dimension %(concept)s does not refer to an existing dimension concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, concept=aspect)
if isinstance(memQname, QName) and (mem is None or not mem.isDomainMember):
view.modelXbrl.error(u"xbrlte:invalidQNameAspectValue",
_(u"Rule node %(xlinkLabel)s specifies domain member %(concept)s does not refer to an existing domain member concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, concept=memQname)
if axisDisposition == u"z":
if structuralNode.choiceStructuralNodes:
choiceNodeIndex = view.zOrdinateChoices.get(definitionNode, 0)
if isinstance(choiceNodeIndex, dict): # aspect entry for open node
structuralNode.aspects = choiceNodeIndex
structuralNode.choiceNodeIndex = -1
elif choiceNodeIndex < len(structuralNode.choiceStructuralNodes):
structuralNode.choiceNodeIndex = choiceNodeIndex
else:
structuralNode.choiceNodeIndex = 0
view.zmostOrdCntx = structuralNode
if not isCartesianProductExpanded or (axisDisposition == u"z" and structuralNode.choiceStructuralNodes is not None):
cartesianProductExpander(structuralNode, *cartesianProductNestedArgs)
if not structuralNode.childStructuralNodes: # childless root ordinate, make a child to iterate in producing table
subOrdContext = StructuralNode(structuralNode, breakdownNode, definitionNode)
except ResolutionException, ex:
if sys.version[0] >= u'3':
#import traceback
#traceback.print_tb(ex.__traceback__)
raise ex, None, ex.__traceback__ # provide original traceback information
else:
raise ex
except Exception, ex:
e = ResolutionException(u"arelle:resolutionException",
_(u"Exception in resolution of definition node %(node)s: %(error)s"),
modelObject=definitionNode, node=definitionNode.qname, error=unicode(ex)
)
if sys.version[0] >= u'3':
raise e, None, ex.__traceback__ # provide original traceback information
else:
raise e
def cartesianProductExpander(childStructuralNode, view, depth, axisDisposition, facts, tblAxisRels, i):
if i is not None: # recurse table relationships for cartesian product
for j, tblRel in enumerate(tblAxisRels[i+1:]):
tblObj = tblRel.toModelObject
if isinstance(tblObj, (ModelEuAxisCoord, ModelDefinitionNode)) and axisDisposition == tblRel.axisDisposition:
#addBreakdownNode(view, axisDisposition, tblObj)
#if tblObj.cardinalityAndDepth(childStructuralNode)[1] or axisDisposition == "z":
if axisDisposition == u"z":
subOrdTblCntx = StructuralNode(childStructuralNode, tblObj, tblObj)
subOrdTblCntx._choiceStructuralNodes = [] # this is a breakdwon node
subOrdTblCntx.indent = 0 # separate breakdown not indented]
depth = 0 # cartesian next z is also depth 0
childStructuralNode.childStructuralNodes.append(subOrdTblCntx)
else: # non-ordinate composition
subOrdTblCntx = childStructuralNode
# predefined axes need facts sub-filtered
if isinstance(childStructuralNode.definitionNode, ModelClosedDefinitionNode):
matchingFacts = childStructuralNode.evaluate(childStructuralNode.definitionNode,
childStructuralNode.definitionNode.filteredFacts,
evalArgs=(facts,))
else:
matchingFacts = facts
# returns whether there were no structural node results
subOrdTblCntx.abstract = True # can't be abstract across breakdown
expandDefinition(view, subOrdTblCntx, tblObj, tblObj,
depth, # depth + (0 if axisDisposition == 'z' else 1),
axisDisposition, matchingFacts, j + i + 1, tblAxisRels) #cartesian product
break
def addRelationship(breakdownNode, relDefinitionNode, rel, structuralNode, cartesianProductNestedArgs, selfStructuralNodes=None):
variableQname = relDefinitionNode.variableQname
conceptQname = relDefinitionNode.conceptQname
coveredAspect = relDefinitionNode.coveredAspect(structuralNode)
if not coveredAspect:
return None
if selfStructuralNodes is not None:
fromConceptQname = rel.fromModelObject.qname
# is there an ordinate for this root object?
if fromConceptQname in selfStructuralNodes:
childStructuralNode = selfStructuralNodes[fromConceptQname]
else:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, relDefinitionNode)
structuralNode.childStructuralNodes.append(childStructuralNode)
selfStructuralNodes[fromConceptQname] = childStructuralNode
if variableQname:
childStructuralNode.variables[variableQname] = []
if conceptQname:
childStructuralNode.variables[conceptQname] = fromConceptQname
childStructuralNode.aspects[coveredAspect] = fromConceptQname
relChildStructuralNode = StructuralNode(childStructuralNode, breakdownNode, relDefinitionNode)
childStructuralNode.childStructuralNodes.append(relChildStructuralNode)
else:
relChildStructuralNode = StructuralNode(structuralNode, breakdownNode, relDefinitionNode)
structuralNode.childStructuralNodes.append(relChildStructuralNode)
preferredLabel = rel.preferredLabel
if preferredLabel == XbrlConst.periodStartLabel:
relChildStructuralNode.tagSelector = u"table.periodStart"
elif preferredLabel == XbrlConst.periodStartLabel:
relChildStructuralNode.tagSelector = u"table.periodEnd"
if variableQname:
relChildStructuralNode.variables[variableQname] = rel
toConceptQname = rel.toModelObject.qname
if conceptQname:
relChildStructuralNode.variables[conceptQname] = toConceptQname
relChildStructuralNode.aspects[coveredAspect] = toConceptQname
cartesianProductExpander(relChildStructuralNode, *cartesianProductNestedArgs)
return relChildStructuralNode
def addRelationships(breakdownNode, relDefinitionNode, rels, structuralNode, cartesianProductNestedArgs):
childStructuralNode = None # holder for nested relationships
for rel in rels:
if not isinstance(rel, list):
# first entry can be parent of nested list relationships
childStructuralNode = addRelationship(breakdownNode, relDefinitionNode, rel, structuralNode, cartesianProductNestedArgs)
elif childStructuralNode is None:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, relDefinitionNode)
structuralNode.childStructuralNodes.append(childStructuralNode)
addRelationships(breakdownNode, relDefinitionNode, rel, childStructuralNode, cartesianProductNestedArgs)
else:
addRelationships(breakdownNode, relDefinitionNode, rel, childStructuralNode, cartesianProductNestedArgs)
|
sternshus/arelle2.7
|
svr-2.7/arelle/RenderingResolver.py
|
Python
|
apache-2.0
| 43,274
|
#!/usr/bin/env python
# coding=utf-8
"""321. Swapping Counters
https://projecteuler.net/problem=321
A horizontal row comprising of 2n \+ 1 squares has n red counters placed at
one end and n blue counters at the other end, being separated by a single
empty square in the centre. For example, when n = 3.
A counter can move from one square to the next (slide) or can jump over
another counter (hop) as long as the square next to that counter is
unoccupied.
Let M(n) represent the minimum number of moves/actions to completely reverse
the positions of the coloured counters; that is, move all the red counters to
the right and all the blue counters to the left.
It can be verified M(3) = 15, which also happens to be a triangle number.
If we create a sequence based on the values of n for which M(n) is a triangle
number then the first five terms would be:
1, 3, 10, 22, and 63, and their sum would be 99.
Find the sum of the first forty terms of this sequence.
"""
|
openqt/algorithms
|
projecteuler/pe321-swapping-counters.py
|
Python
|
gpl-3.0
| 972
|
from unittest import TestCase
from rfxcom.protocol.temphumiditybaro import TempHumidityBaro
from rfxcom.exceptions import (InvalidPacketLength, UnknownPacketSubtype,
UnknownPacketType)
class TempHumidityBaroTestCase(TestCase):
def setUp(self):
self.data = bytearray(b'\x0D\x54\x01\x11\x70\x02\x00\x25'
b'\x30\x01\x03\xF5\x02\x89')
self.parser = TempHumidityBaro()
def test_parse_bytes(self):
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 13,
'packet_type': 84,
'packet_type_name':
'Temperature and humidity and barometric sensors',
'sequence_number': 17,
'packet_subtype': 1,
'packet_subtype_name': 'BTHR918',
'temperature': 3.7,
'id': '0x7002',
'signal_level': 8,
'humidity': 48,
'humidity_status': 'Comfort',
'barometry': 1013,
'forecast': 2,
'forecast_status': 'Partly cloudy',
'battery_level': 9
})
self.assertEquals(str(self.parser), "<TempHumidityBaro ID:0x7002>")
# def test_parse_bytes2(self):
# self.data = bytearray(b'\x0A\x52\x02\x02\xAE\x01\x00\x63'
# b'\x62\x03\x59')
# self.assertTrue(self.parser.validate_packet(self.data))
# self.assertTrue(self.parser.can_handle(self.data))
# result = self.parser.load(self.data)
# self.assertEquals(result, {
# 'packet_length': 10,
# 'packet_type': 82,
# 'packet_type_name': 'Temperature and humidity sensors',
# 'sequence_number': 2,
# 'packet_subtype': 2,
# 'packet_subtype_name': 'THGR810, THGN801, THGN800',
# 'temperature': 9.9,
# 'id': '0xAE01',
# 'channel': 1,
# 'signal_level': 5,
# 'humidity': 98,
# 'humidity_status': 'Wet',
# 'battery_level': 9
# })
# self.assertEquals(str(self.parser), "<TempHumidityBaro ID:0xAE01>")
def test_parse_bytes_negative_temp(self):
self.data = bytearray(b'\x0D\x54\x01\x11\x70\x02\x80\x25'
b'\x30\x01\x03\xF5\x02\x89')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result['temperature'], -3.7)
def test_validate_bytes_short(self):
data = self.data[:1]
with self.assertRaises(InvalidPacketLength):
self.parser.validate_packet(data)
def test_validate_unkown_packet_type(self):
self.data[1] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketType):
self.parser.validate_packet(self.data)
def test_validate_unknown_sub_type(self):
self.data[2] = 0xEE
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketSubtype):
self.parser.validate_packet(self.data)
def test_log_name(self):
self.assertEquals(self.parser.log.name,
'rfxcom.protocol.TempHumidityBaro')
|
skimpax/python-rfxcom
|
tests/protocol/test_temphumiditybaro.py
|
Python
|
bsd-3-clause
| 3,512
|
#!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
# TODO probably this code is completely outdated for Essentia 2.0, storing this code just in case
import sys
import essentia, essentia.streaming, essentia.standard
# When creating your own composite algorithm, your class must inherit from
# essentia.streaming.CompositeBase.
class ExtractorMfcc(essentia.streaming.CompositeBase):
# Specify the parameters of your algorithm and their default values as inputs to the __init__
# method. You can then use these parameters to configure the inner algorithms.
#
# Note: If you desire to translate your python composite algorithm to c++ code, you may not
# perform any operations other than those described below in the __init__ method. This is because
# the translator does not support translating anything other than creating algorithms, declaring
# inputs and outputs, and connecting inner algorithms. You do not need to abide by this
# restriction if you never intend to translate your composite algorithm to c++ code.
#
# To make this point clearer: it might have been convenient to only accept a frameSize
# parameter, and internally, set a hopSize variable to half the frameSize. So within our
# __init__ method, we would have a statement like "hopSize = frameSize/2". However, because this
# statement is not directly one of the operations described above, it will not be translated and
# an error will be raised instead.
def __init__(self, frameSize=2048, hopSize=1024, windowType='blackmanharris62'):
# Don't forget to call the base class's __init__ method!
super(ExtractorMfcc, self).__init__()
# Create and configure each inner algorithm
fc = essentia.streaming.FrameCutter(frameSize=frameSize,
hopSize=hopSize,
silentFrames='noise')
wnd = essentia.streaming.Windowing(type=windowType)
spec = essentia.streaming.Spectrum()
mfcc = essentia.streaming.MFCC()
# Declare the inputs of your composite algorithm in the self.inputs dictionary. The keys of
# this dictionary should be the name you give your input, and the values are the inputs of
# inner algorithms
self.inputs['audio'] = fc.signal
# Make connections between the inner algorithms
fc.frame >> wnd.frame >> spec.frame
spec.spectrum >> mfcc.spectrum
# If an output is not needed, it still must be connected--connect it to None
mfcc.bands >> None
# Declare your outputs in the same way as the inputs. Output names are allowed to be the
# same as input names, and is encouraged, if it makes sense for your composite algorithm.
# If the names match, you can do things like chaining connections as we did for the
# Windowing algorithm above.
self.outputs['mfcc'] = mfcc.mfcc
if __name__ == '__main__':
# Make sure the command was well-formed.
if len(sys.argv) < 3:
print 'Usage: extractor_mfcc.py <input audio filename> <output yaml filename>'
sys.exit(1)
# Loaders must be specified outside your composite algorithm.
loader = essentia.streaming.MonoLoader(filename=sys.argv[1])
# We are using the default values of our parameters so we don't specify any keyword arguments.
mfccex = ExtractorMfcc()
p = essentia.Pool()
# When connecting to/from your composite algorithm, use the names you declared in the
# self.inputs and self.outputs dictionaries, respectively.
loader.audio >> mfccex.audio
mfccex.mfcc >> (p, 'mfcc')
essentia.run(loader)
# CompoxiteBase algorithms can be translated into c++ code and dot graphs
# can also be generated:
essentia.translate(ExtractorMfcc, # algorithm to be translated
'myExtractorMfcc', # output name for the c++ and dot generated files
dot_graph=True) # whether dot file should be generated
essentia.standard.YamlOutput(filename=sys.argv[2])(p)
|
MTG/essentia
|
src/examples/python/outdated/extractor_mfcc.py
|
Python
|
agpl-3.0
| 4,847
|
from django.contrib import admin
from .models import PizzaPlace
admin.site.register(PizzaPlace)
|
nicole-a-tesla/meetup.pizza
|
pizzaplace/admin.py
|
Python
|
mit
| 99
|
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
import IECoreScene
## To deprecate the uTangent and vTangent we hide them and feed in the new plugs.
def postCreate( node, menu ) :
node["uTangent"].setInput( node['tangent'] )
node["vTangent"].setInput( node['biTangent'] )
Gaffer.Metadata.registerValue( node['uTangent'], 'plugValueWidget:type', '' )
Gaffer.Metadata.registerValue( node['vTangent'], 'plugValueWidget:type', '' )
Gaffer.Metadata.registerValue( node['tangent'], 'layout:activator', 'alwaysActive' )
Gaffer.Metadata.registerValue( node['biTangent'], 'layout:activator', 'alwaysActive' )
Gaffer.Metadata.registerNode(
GafferScene.MeshTangents,
"description",
"""
Adds surface tangent primitive variables to the mesh based on either UV or topology information.
""",
"layout:activator:uvActivator", lambda parent : parent["mode"].getValue() == int(GafferScene.MeshTangents.Mode.UV),
"layout:activator:uvDeactivator", lambda parent : parent["mode"].getValue() != int(GafferScene.MeshTangents.Mode.UV),
"layout:activator:leftHandedActivator", lambda parent : parent["orthogonal"].getValue() == True,
"layout:activator:alwaysActive", lambda x : True,
plugs = {
"mode" : [
"description",
"""
The style of how to calculate the Tangents.
(UV) calculates the tangents based on the gradient of the the corresponding UVs
(FirstEdge) defines the vector to the first neighbor as tangent and the bitangent orthogonal to tangent and normal
(TwoEdges) defines the vector between the first two neighbors as tangent and the bitangent orthogonal to tangent and normal
(PrimitiveCentroid) points the tangent towards the primitive centroid and the bitangent orthogonal to tangent and normal
""",
"preset:UV", GafferScene.MeshTangents.Mode.UV,
"preset:FirstEdge", GafferScene.MeshTangents.Mode.FirstEdge,
"preset:TwoEdges", GafferScene.MeshTangents.Mode.TwoEdges,
"preset:PrimitiveCentroid", GafferScene.MeshTangents.Mode.PrimitiveCentroid,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"orthogonal" : [
"description",
"""
Adjusts vTangent to be orthogonal to the uTangent.
""",
],
"leftHanded" : [
"description",
"""
Make the local coordinate frame left handed
""",
"layout:activator", "leftHandedActivator",
],
"position" : [
"description",
"""
Name of the primitive variable which contains the position data used to calculate tangents & binormals.
For example 'Pref' would compute tangents using the reference positions (if defined)
""",
"layout:section", "Settings.Input",
],
"normal" : [
"description",
"""
Name of the primitive variable which contains the normals used to calculate tangents & binormals.
""",
"layout:section", "Settings.Input",
"layout:activator", "uvDeactivator",
],
"uvSet" : [
"description",
"""
Name of the UV set primitive variable used to calculate uTangent & vTangent.
""",
"layout:section", "Settings.Input",
"layout:activator", "uvActivator",
],
"uTangent" : [
"description",
"""
Name of the primitive variable which will contain the uTangent data.
""",
"layout:section", "Settings.Output",
"layout:activator", "uvActivator",
],
"vTangent" : [
"description",
"""
Name of the primitive variable which will contain the vTangent data.
""",
"layout:section", "Settings.Output",
"layout:activator", "uvActivator",
],
"tangent" : [
"description",
"""
Name of the primitive variable which will contain the tangent data.
""",
"layout:section", "Settings.Output",
"layout:activator", "uvDeactivator",
],
"biTangent" : [
"description",
"""
Name of the primitive variable which will contain the biTangent data.
""",
"layout:section", "Settings.Output",
"layout:activator", "uvDeactivator",
]
}
)
|
lucienfostier/gaffer
|
python/GafferSceneUI/MeshTangentsUI.py
|
Python
|
bsd-3-clause
| 5,682
|
import warnings
from pypeerassets.kutil import Kutil
from pypeerassets.protocol import Deck
from pypeerassets import pavoteproto
from hashlib import sha256
from binascii import unhexlify
from pypeerassets import transactions
from pypeerassets.pautils import read_tx_opreturn, find_tx_sender, get_block_info
from .networks import query, networks
def deck_vote_tag(deck):
'''deck vote tag address'''
deck_vote_tag_privkey = sha256(unhexlify(deck.asset_id) + b"vote_init").hexdigest()
deck_vote_tag_address = Kutil(network=deck.network, privkey=deck_vote_tag_privkey)
return deck_vote_tag_address.address
class Vote:
def __init__(self, version: int, description: str, count_mode: str,
start_block: int, end_block: int, deck: Deck,
choices=[], vote_metainfo="", vote_id=None, sender=None):
'''initialize vote object'''
self.version = version
self.description = description
self.choices = choices
self.count_mode = count_mode
self.start_block = start_block # at which block does vote start
self.end_block = end_block # at which block does vote end
self.vote_id = vote_id # vote_init txid
self.vote_metainfo = vote_metainfo # any extra info describing the vote
self.sender = sender
self.deck = deck
@property
def to_protobuf(self):
'''encode vote into protobuf'''
vote = pavoteproto.Vote()
vote.version = self.version
vote.description = self.description
vote.count_mode = vote.MODE.Value(self.count_mode)
vote.start_block = self.start_block
vote.end_block = self.end_block
vote.choices.extend(self.choices)
if not isinstance(self.vote_metainfo, bytes):
vote.vote_metainfo = self.vote_metainfo.encode()
else:
vote.vote_metainfo = self.vote_metainfo
proto = vote.SerializeToString()
if len(proto) > 80:
warnings.warn('\nMetainfo size exceeds maximum of 80 bytes allowed by OP_RETURN.')
return proto
@property
def to_dict(self):
'''vote info as dict'''
return {
"version": self.version,
"description": self.description,
"count_mode": self.count_mode,
"start_block": self.start_block,
"end_block": self.end_block,
"choices": self.choices,
"vote_metainfo": self.vote_metainfo
}
@property
def vote_choice_address(self):
'''calculate the addresses on which the vote is casted.'''
addresses = []
vote_init_txid = unhexlify(self.vote_id)
for choice in self.choices:
vote_cast_privkey = sha256(vote_init_txid + bytes(
list(self.choices).index(choice))
).hexdigest()
addresses.append(Kutil(network=self.deck.network,
privkey=vote_cast_privkey).address)
return addresses
def parse_vote_info(protobuf: bytes) -> dict:
'''decode vote init tx op_return protobuf message and validate it.'''
vote = pavoteproto.Vote()
vote.ParseFromString(protobuf)
assert vote.version > 0, {"error": "Vote info incomplete, version can't be 0."}
assert vote.start_block < vote.end_block, {"error": "vote can't end in the past."}
return {
"version": vote.version,
"description": vote.description,
"count_mode": vote.MODE.Name(vote.count_mode),
"choices": vote.choices,
"start_block": vote.start_block,
"end_block": vote.end_block,
"vote_metainfo": vote.vote_metainfo
}
def vote_init(vote: Vote, inputs: list, change_address: str) -> bytes:
'''initialize vote transaction, must be signed by the deck_issuer privkey'''
network_params = query(vote.deck.network)
deck_vote_tag_address = deck_vote_tag(vote.deck)
tx_fee = network_params.min_tx_fee # settle for min tx fee for now
for utxo in inputs['utxos']:
utxo['txid'] = unhexlify(utxo['txid'])
utxo['scriptSig'] = unhexlify(utxo['scriptSig'])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(deck_vote_tag_address)},
{"redeem": 0, "outputScript": transactions.op_return_script(vote.to_protobuf)},
{"redeem": float(inputs['total']) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(vote.deck.network, inputs['utxos'], outputs)
def find_vote_inits(provider, deck):
'''find vote_inits on this deck'''
vote_ints = provider.listtransactions(deck_vote_tag(deck))
for txid in vote_ints:
try:
raw_vote = provider.getrawtransaction(txid)
vote = parse_vote_info(read_tx_opreturn(raw_vote))
vote["vote_id"] = txid
vote["sender"] = find_tx_sender(provider, raw_vote)
vote["deck"] = deck
yield Vote(**vote)
except AssertionError:
pass
def vote_cast(vote: Vote, choice_index: int, inputs: list,
change_address: str) -> bytes:
'''vote cast transaction'''
network_params = query(vote.deck.network)
vote_cast_addr = vote.vote_choice_address[choice_index]
tx_fee = network_params.min_tx_fee # settle for min tx fee for now
for utxo in inputs['utxos']:
utxo['txid'] = unhexlify(utxo['txid'])
utxo['scriptSig'] = unhexlify(utxo['scriptSig'])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(vote_cast_addr)},
{"redeem": float(inputs['total']) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(vote.deck.network, inputs['utxos'], outputs)
class VoteCast:
'''vote cast object, internal represtentation of the vote_cast transaction'''
def __init__(self, vote: Vote, sender: str, blocknum: int,
confirmations: int, timestamp: int):
self.vote = vote
self.sender = sender
self.blocknum = blocknum
self.confirmations = confirmations
self.timestamp = timestamp
@property
def is_valid(self):
'''check if VoteCast is valid'''
if not (self.blocknum >= self.vote.start_block and
self.blocknum <= self.vote.end_block):
return False
if not self.confirmations >= 6:
return False
return True
def find_vote_casts(provider, vote: Vote, choice_index: int):
'''find and verify vote_casts on this vote_choice_address'''
vote_casts = provider.listtransactions(vote.vote_choice_address[choice_index])
for tx in vote_casts:
raw_tx = provider.getrawtransaction(tx, 1)
sender = find_tx_sender(provider, raw_tx)
confirmations = raw_tx["confirmations"]
blocknum = get_block_info(provider, raw_tx["blockhash"])["height"]
yield VoteCast(vote, sender, blocknum, confirmations, raw_tx["blocktime"])
|
backpacker69/pypeerassets
|
pypeerassets/voting.py
|
Python
|
bsd-3-clause
| 7,188
|
from setuptools import setup
setup(
name = 'weblayer-pastedemo',
version = '0.1',
install_requires=[
'weblayer',
'PasteScript',
'WSGIUtils'
],
scripts = ['demo.py'],
entry_points = {
'paste.app_factory': [
'main=demo:app_factory',
]
}
)
|
thruflo/weblayer
|
src/weblayer/examples/deploy/paste/setup.py
|
Python
|
unlicense
| 318
|
import numpy as np
def softmax(x):
"""Compute the softmax function for each row of the input x.
It is crucial that this function is optimized for speed because
it will be used frequently in later code. You might find numpy
functions np.exp, np.sum, np.reshape, np.max, and numpy
broadcasting useful for this task.
Numpy broadcasting documentation:
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
You should also make sure that your code works for a single
N-dimensional vector (treat the vector as a single row) and
for M x N matrices. This may be useful for testing later. Also,
make sure that the dimensions of the output match the input.
You must implement the optimization in problem 1(a) of the
written assignment!
Arguments:
x -- A N dimensional vector or M x N dimensional numpy matrix.
Return:
x -- You are allowed to modify x in-place
"""
orig_shape = x.shape
if len(x.shape) > 1:
# Matrix
# Subtract the max number in the row to make the largest number 0.
# It doesn't change the answer and prevents it from exploding.
x -= np.max(x, axis=1).reshape(-1,1)
x = np.exp(x) / np.sum(np.exp(x), axis=1).reshape(-1,1)
else:
# Vector
x -= np.max(x)
x = np.exp(x) / np.sum(np.exp(x))
assert x.shape == orig_shape
return x
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print("Running basic tests...")
test1 = softmax(np.array([1,2]))
print(test1)
ans1 = np.array([0.26894142, 0.73105858])
assert np.allclose(test1, ans1, rtol=1e-05, atol=1e-06)
test2 = softmax(np.array([[1001,1002],[3,4]]))
print(test2)
ans2 = np.array([
[0.26894142, 0.73105858],
[0.26894142, 0.73105858]])
assert np.allclose(test2, ans2, rtol=1e-05, atol=1e-06)
test3 = softmax(np.array([[-1001,-1002]]))
print(test3)
ans3 = np.array([0.73105858, 0.26894142])
assert np.allclose(test3, ans3, rtol=1e-05, atol=1e-06)
print("You should be able to verify these results by hand!\n")
def test_softmax():
"""
Use this space to test your softmax implementation by running:
python q1_softmax.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print("Running your tests...")
test4 = softmax(np.array([[0,0,0],[3,4,1000]]))
print(test4)
if __name__ == "__main__":
test_softmax_basic()
test_softmax()
|
IAAAIAAIA/CS231n
|
thomas/assignment1/q1_softmax.py
|
Python
|
mit
| 2,587
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Mentorship'
db.create_table('tracker_mentorship', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('jobseeker', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobseeker', to=orm['tracker.UserProfile'])),
('mentor', self.gf('django.db.models.fields.related.ForeignKey')(related_name='mentor', to=orm['tracker.UserProfile'])),
('coach', self.gf('django.db.models.fields.related.ForeignKey')(related_name='coach', to=orm['tracker.UserProfile'])),
('startDate', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('tracker', ['Mentorship'])
def backwards(self, orm):
# Deleting model 'Mentorship'
db.delete_table('tracker_mentorship')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tracker.apply': {
'Meta': {'ordering': "['-when', 'time']", 'object_name': 'Apply'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Position']", 'unique': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'when': ('django.db.models.fields.DateField', [], {})
},
'tracker.company': {
'Meta': {'ordering': "['name']", 'object_name': 'Company'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'division': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state_province': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zipCode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'tracker.conversation': {
'Meta': {'ordering': "['-when', 'time']", 'object_name': 'Conversation'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Person']", 'unique': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'via': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'when': ('django.db.models.fields.DateField', [], {})
},
'tracker.gratitude': {
'Meta': {'ordering': "['-when', 'time']", 'object_name': 'Gratitude'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Person']", 'unique': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'when': ('django.db.models.fields.DateField', [], {})
},
'tracker.interview': {
'Meta': {'ordering': "['-when', 'time']", 'object_name': 'Interview'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Position']", 'unique': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'when': ('django.db.models.fields.DateField', [], {}),
'withWhom': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Person']"})
},
'tracker.lunch': {
'Meta': {'ordering': "['-when', 'time']", 'object_name': 'Lunch'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'venue': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'when': ('django.db.models.fields.DateField', [], {}),
'withWhom': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Person']"})
},
'tracker.mentorship': {
'Meta': {'object_name': 'Mentorship'},
'coach': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'coach'", 'to': "orm['tracker.UserProfile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jobseeker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobseeker'", 'to': "orm['tracker.UserProfile']"}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentor'", 'to': "orm['tracker.UserProfile']"}),
'startDate': ('django.db.models.fields.DateField', [], {})
},
'tracker.networking': {
'Meta': {'ordering': "['-when', 'time']", 'object_name': 'Networking'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Company']", 'unique': 'True'}),
'when': ('django.db.models.fields.DateField', [], {})
},
'tracker.onlinepresence': {
'Meta': {'ordering': "['name']", 'object_name': 'OnlinePresence'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"})
},
'tracker.par': {
'Meta': {'ordering': "['question']", 'object_name': 'PAR'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'par_response': ('django.db.models.fields.TextField', [], {}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"})
},
'tracker.person': {
'Meta': {'object_name': 'Person'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Company']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"})
},
'tracker.pitch': {
'Meta': {'object_name': 'Pitch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'thePitch': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"})
},
'tracker.position': {
'Meta': {'ordering': "['title']", 'object_name': 'Position'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Company']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.UserProfile']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'tracker.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tracker']
|
kern3020/opportunity
|
opportunity/tracker/migrations/0002_auto__add_mentorship.py
|
Python
|
mit
| 13,807
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
#self.video = cv2.VideoCapture(0)
self.camera = PiCamera()
#self.rawCapture = PiRGBArray(self.camera)
self.camera.resolution = (640, 480)
self.camera.framerate = 32
self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
#self.video.release()
pass
def get_frame(self):
# grab an image from the camera
self.camera.capture(self.rawCapture, format="bgr")
image = self.rawCapture.array
self.rawCapture.truncate(0)
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tostring()
|
tobykurien/pi-tracking-telescope
|
app/playground/video_streaming_with_flask_example-master/camera.py
|
Python
|
mit
| 1,387
|
from numberRecognition import app
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
JaeGyu/PythonEx_1
|
flaskEx/numberRecognition/app_start.py
|
Python
|
mit
| 86
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class TlcDeIE(InfoExtractor):
IE_NAME = 'tlc.de'
_VALID_URL = r'https?://(?:www\.)?tlc\.de/(?:[^/]+/)*videos/(?P<title>[^/?#]+)?(?:.*#(?P<id>\d+))?'
_TEST = {
'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001',
'info_dict': {
'id': '3235167922001',
'ext': 'mp4',
'title': 'Breaking Amish: Die Welt da draußen',
'description': (
'Vier Amische und eine Mennonitin wagen in New York'
' den Sprung in ein komplett anderes Leben. Begleitet sie auf'
' ihrem spannenden Weg.'),
'timestamp': 1396598084,
'upload_date': '20140404',
'uploader_id': '1659832546',
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1659832546/default_default/index.html?videoId=%s'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
brightcove_id = mobj.group('id')
if not brightcove_id:
title = mobj.group('title')
webpage = self._download_webpage(url, title)
brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
brightcove_id = compat_parse_qs(compat_urlparse.urlparse(brightcove_legacy_url).query)['@videoPlayer'][0]
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
malept/youtube-dl
|
youtube_dl/extractor/tlc.py
|
Python
|
unlicense
| 1,644
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('applicants', '0010_auto_20151126_0525'),
]
operations = [
migrations.AddField(
model_name='applicant',
name='number_of_missed_calls',
field=models.IntegerField(default=0),
),
]
|
shailr/vms
|
applicants/migrations/0011_applicant_number_of_missed_calls.py
|
Python
|
gpl-2.0
| 421
|
import re
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from crispy_forms import layout as crispy
from crispy_forms.helper import FormHelper
from memoized import memoized
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.hqwebapp.crispy import FieldWithHelpBubble, FormActions
from corehq.apps.users.models import CommCareUser
class EmailForm(forms.Form):
email_subject = forms.CharField(max_length=100)
email_body_html = forms.CharField()
email_body_text = forms.CharField()
real_email = forms.BooleanField(required=False)
class ReprocessMessagingCaseUpdatesForm(forms.Form):
case_ids = forms.CharField(widget=forms.Textarea)
def clean_case_ids(self):
value = self.cleaned_data.get('case_ids', '')
value = value.split()
if not value:
raise ValidationError(_("This field is required."))
return set(value)
def __init__(self, *args, **kwargs):
super(ReprocessMessagingCaseUpdatesForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.form_id = 'reprocess-messaging-updates'
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8'
self.helper.layout = crispy.Layout(
FieldWithHelpBubble(
'case_ids',
help_bubble_text=_("Enter a space-separated list of case ids to reprocess. "
"Reminder rules will be rerun for the case, and the case's phone "
"number entries will be synced."),
),
FormActions(
crispy.Submit(
'submit',
'Submit'
)
)
)
class SuperuserManagementForm(forms.Form):
csv_email_list = forms.CharField(
label="Comma seperated email addresses",
widget=forms.Textarea()
)
privileges = forms.MultipleChoiceField(
choices=[
('is_superuser', 'Mark as superuser'),
],
widget=forms.CheckboxSelectMultiple(),
required=False,
)
def clean(self):
from email.utils import parseaddr
from django.contrib.auth.models import User
csv_email_list = self.cleaned_data.get('csv_email_list', '')
csv_email_list = csv_email_list.split(',')
csv_email_list = [parseaddr(em)[1] for em in csv_email_list]
if len(csv_email_list) > 10:
raise forms.ValidationError(
"This command is intended to grant superuser access to few users at a time. "
"If you trying to update permissions for large number of users consider doing it via Django Admin"
)
users = []
for username in csv_email_list:
if settings.IS_DIMAGI_ENVIRONMENT and "@dimagi.com" not in username:
raise forms.ValidationError("Email address '{}' is not a dimagi email address".format(username))
try:
users.append(User.objects.get(username=username))
except User.DoesNotExist:
raise forms.ValidationError(
"User with email address '{}' does not exist on "
"this site, please have the user registered first".format(username))
self.cleaned_data['users'] = users
return self.cleaned_data
def __init__(self, can_toggle_is_staff, *args, **kwargs):
super(SuperuserManagementForm, self).__init__(*args, **kwargs)
if can_toggle_is_staff:
self.fields['privileges'].choices.append(
('is_staff', 'Mark as developer')
)
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.layout = crispy.Layout(
'csv_email_list',
'privileges',
FormActions(
crispy.Submit(
'superuser_management',
'Update privileges'
)
)
)
class DisableTwoFactorForm(forms.Form):
VERIFICATION = (
('in_person', 'In Person'),
('voice', 'By Voice'),
('video', 'By Video'),
('via_someone_else', 'Via another Dimagi Employee'),
)
username = forms.EmailField(label=_("Confirm the username"))
verification_mode = forms.ChoiceField(
choices=VERIFICATION, required=True, label="How was the request verified?"
)
via_who = forms.EmailField(
label=_("Verified by"),
required=False,
help_text="If you verified the request via someone else please enter their email address."
)
disable_for_days = forms.IntegerField(
label=_("Days to allow access"),
min_value=0,
max_value=30,
help_text=_(
"Number of days the user can access CommCare HQ before needing to re-enable two-factor auth."
"This is useful if someone has lost their phone and can't immediately re-setup two-factor auth.")
)
def __init__(self, initial, **kwargs):
self.username = initial.pop('username')
super(DisableTwoFactorForm, self).__init__(initial=initial, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.form_action = '#'
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_("Basic Information"),
crispy.Field('username'),
crispy.Field('verification_mode'),
crispy.Field('via_who'),
crispy.Field('disable_for_days'),
),
hqcrispy.FormActions(
crispy.Submit(
"disable",
_("Disable"),
css_class="btn btn-danger",
),
css_class='modal-footer',
),
)
def clean_username(self):
username = self.cleaned_data['username']
if username != self.username:
raise forms.ValidationError("Username doesn't match expected.")
return username
def clean(self):
verification_mode = self.cleaned_data['verification_mode']
if verification_mode == 'via_someone_else' and not self.cleaned_data['via_who']:
raise forms.ValidationError({
"via_who": "Please enter the email address of the person who verified the request."
})
return self.cleaned_data
class DisableUserForm(forms.Form):
reason = forms.CharField(
label=_("Reason"),
help_text=_("Please give a reason for this action.")
)
reset_password = forms.BooleanField(
label=_("Reset account password"),
required=False,
help_text=_("Resetting the user's password will force them to follow the 'Forgot Password' workflow."
" Use this if it is suspected that the password has been compromised.")
)
def __init__(self, initial, **kwargs):
self.user = initial.pop('user')
super(DisableUserForm, self).__init__(initial=initial, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.form_action = '#'
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
action = _("Disable") if self.user.is_active else _("Enable")
css_class = 'btn-danger' if self.user.is_active else 'btn-primary'
self.helper.layout = crispy.Layout(
crispy.Field('reason'),
crispy.Field('reset_password'),
hqcrispy.FormActions(
crispy.Submit(
"submit",
action,
css_class="btn %s" % css_class,
),
css_class='modal-footer',
),
)
|
dimagi/commcare-hq
|
corehq/apps/hqadmin/forms.py
|
Python
|
bsd-3-clause
| 8,362
|
from django.contrib.auth.models import User
from django.test import TestCase
from django_dynamic_fixture import get
from readthedocs.builds.constants import EXTERNAL, LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.constants import PRIVATE, PUBLIC
from readthedocs.projects.models import Project
class TestVersionQuerySetBase(TestCase):
def setUp(self):
self.user = get(User)
self.another_user = get(User)
self.project = get(
Project,
privacy_level=PUBLIC,
external_builds_privacy_level=PUBLIC,
users=[self.user],
main_language_project=None,
versions=[],
)
self.version_latest = self.project.versions.get(slug=LATEST)
self.version = get(
Version,
privacy_level=PUBLIC,
project=self.project,
active=True,
)
self.version_private = get(
Version,
privacy_level=PRIVATE,
project=self.project,
active=True,
)
self.another_project = get(
Project,
privacy_level=PUBLIC,
external_builds_privacy_level=PUBLIC,
users=[self.another_user],
main_language_project=None,
versions=[],
)
self.another_version_latest = self.another_project.versions.get(slug=LATEST)
self.another_version = get(
Version,
privacy_level=PUBLIC,
project=self.another_project,
active=True,
)
self.another_version_private = get(
Version,
privacy_level=PRIVATE,
project=self.another_project,
active=True,
)
self.shared_project = get(
Project,
privacy_level=PUBLIC,
external_builds_privacy_level=PUBLIC,
users=[self.user, self.another_user],
main_language_project=None,
versions=[],
)
self.shared_version_latest = self.shared_project.versions.get(slug=LATEST)
self.shared_version = get(
Version,
privacy_level=PUBLIC,
project=self.shared_project,
active=True,
)
self.shared_version_private = get(
Version,
privacy_level=PRIVATE,
project=self.shared_project,
active=True,
)
self.user_versions = {
self.version,
self.version_latest,
self.version_private,
self.shared_version,
self.shared_version_latest,
self.shared_version_private,
}
self.another_user_versions = {
self.another_version_latest,
self.another_version,
self.another_version_private,
self.shared_version,
self.shared_version_latest,
self.shared_version_private,
}
class VersionQuerySetTests(TestVersionQuerySetBase):
def test_public(self):
query = Version.objects.public()
versions = {
self.version_latest,
self.version,
self.another_version,
self.another_version_latest,
self.shared_version,
self.shared_version_latest,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_user(self):
query = Version.objects.public(user=self.user)
versions = (
self.user_versions |
{self.another_version_latest, self.another_version}
)
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_project(self):
query = Version.objects.public(user=self.user, project=self.project)
versions = {
self.version,
self.version_latest,
self.version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_api(self):
query = Version.objects.api()
versions = {
self.version_latest,
self.version,
self.another_version,
self.another_version_latest,
self.shared_version,
self.shared_version_latest,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
class TestVersionQuerySetWithManagerBase(TestVersionQuerySetBase):
def setUp(self):
super().setUp()
self.external_version_public = get(
Version,
project=self.project,
active=True,
type=EXTERNAL,
privacy_level=PUBLIC,
)
self.external_version_private = get(
Version,
project=self.project,
active=True,
type=EXTERNAL,
privacy_level=PRIVATE,
)
self.another_external_version_public = get(
Version,
project=self.another_project,
active=True,
type=EXTERNAL,
privacy_level=PUBLIC,
)
self.another_external_version_private = get(
Version,
project=self.another_project,
active=True,
type=EXTERNAL,
privacy_level=PRIVATE,
)
self.shared_external_version_public = get(
Version,
project=self.shared_project,
active=True,
type=EXTERNAL,
privacy_level=PUBLIC
)
self.shared_external_version_private = get(
Version,
project=self.shared_project,
active=True,
type=EXTERNAL,
privacy_level=PRIVATE
)
class VersionQuerySetWithInternalManagerTest(TestVersionQuerySetWithManagerBase):
"""
Queries using Internal Manager should only include Internal Versions.
It will exclude EXTERNAL type Versions from the queries
and only include BRANCH, TAG, UNKNOWN type Versions.
"""
def test_all(self):
query = Version.internal.all()
versions = {
self.version_latest,
self.version,
self.version_private,
self.another_version_latest,
self.another_version,
self.another_version_private,
self.shared_version_latest,
self.shared_version,
self.shared_version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public(self):
query = Version.internal.public()
versions = {
self.version_latest,
self.version,
self.another_version,
self.another_version_latest,
self.shared_version,
self.shared_version_latest,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_user(self):
query = Version.internal.public(user=self.user)
versions = (
self.user_versions |
{self.another_version_latest, self.another_version}
)
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_project(self):
query = Version.internal.public(user=self.user, project=self.project)
versions = {
self.version,
self.version_latest,
self.version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_api(self):
query = Version.internal.api()
versions = {
self.version_latest,
self.version,
self.another_version,
self.another_version_latest,
self.shared_version,
self.shared_version_latest,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
class VersionQuerySetWithExternalManagerTest(TestVersionQuerySetWithManagerBase):
"""
Queries using External Manager should only include External Versions.
It will only include pull/merge request Version in the queries.
"""
def test_all(self):
query = Version.external.all()
versions = {
self.external_version_public,
self.external_version_private,
self.another_external_version_public,
self.another_external_version_private,
self.shared_external_version_public,
self.shared_external_version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_with_private_external_versions(self):
self.project.external_builds_privacy_level = PRIVATE
self.project.save()
self.another_project.external_builds_privacy_level = PRIVATE
self.another_project.save()
self.shared_project.external_builds_privacy_level = PRIVATE
self.shared_project.save()
query = Version.external.public()
versions = set()
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_with_some_private_external_versions(self):
self.another_project.external_builds_privacy_level = PRIVATE
self.another_project.save()
self.shared_project.external_builds_privacy_level = PRIVATE
self.shared_project.save()
query = Version.external.public()
versions = {
self.external_version_public,
self.external_version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_with_public_external_versions(self):
query = Version.external.public()
versions = {
self.external_version_public,
self.external_version_private,
self.shared_external_version_public,
self.shared_external_version_private,
self.another_external_version_public,
self.another_external_version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_user(self):
self.project.external_builds_privacy_level = PRIVATE
self.project.save()
self.another_project.external_builds_privacy_level = PRIVATE
self.another_project.save()
query = Version.external.public(user=self.user)
versions = {
self.external_version_public,
self.external_version_private,
self.shared_external_version_public,
self.shared_external_version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_project(self):
query = Version.external.public(user=self.user, project=self.project)
versions = {
self.external_version_public,
self.external_version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_api(self):
self.project.external_builds_privacy_level = PRIVATE
self.project.save()
self.another_project.external_builds_privacy_level = PRIVATE
self.another_project.save()
query = Version.external.api()
versions = {
self.shared_external_version_public,
self.shared_external_version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
|
rtfd/readthedocs.org
|
readthedocs/rtd_tests/tests/test_version_querysets.py
|
Python
|
mit
| 11,875
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.config import committers, urls
class CommitterValidator(object):
def __init__(self, host):
self.host = host
def _committers_py_path(self):
# extension can sometimes be .pyc, we always want .py
committers_path = self.host.filesystem.path_to_module(committers.__name__)
(path, extension) = self.host.filesystem.splitext(committers_path)
path = self.host.filesystem.relpath(path, self.host.scm().checkout_root)
return ".".join([path, "py"])
def _flag_permission_rejection_message(self, setter_email, flag_name):
# This could be queried from the tool.
queue_name = "commit-queue"
committers_list = self._committers_py_path()
message = "%s does not have %s permissions according to %s." % (
setter_email,
flag_name,
urls.view_source_url(committers_list))
message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (
flag_name, urls.contribution_guidelines)
message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed). " % (
flag_name, committers_list)
message += "The %s restarts itself every 2 hours. After restart the %s will correctly respect your %s rights." % (
queue_name, queue_name, flag_name)
return message
def _validate_setter_email(self, patch, result_key, rejection_function):
committer = getattr(patch, result_key)()
# If the flag is set, and we don't recognize the setter, reject the flag!
setter_email = patch._attachment_dictionary.get("%s_email" % result_key)
if setter_email and not committer:
rejection_function(patch.id(), self._flag_permission_rejection_message(setter_email, result_key))
return False
return True
def _reject_patch_if_flags_are_invalid(self, patch):
return (self._validate_setter_email(patch, "reviewer", self.reject_patch_from_review_queue)
and self._validate_setter_email(patch, "committer", self.reject_patch_from_commit_queue))
def patches_after_rejecting_invalid_commiters_and_reviewers(self, patches):
return [patch for patch in patches if self._reject_patch_if_flags_are_invalid(patch)]
def reject_patch_from_commit_queue(self,
attachment_id,
additional_comment_text=None):
comment_text = "Rejecting attachment %s from commit-queue." % attachment_id
self.host.bugs.set_flag_on_attachment(attachment_id,
"commit-queue",
"-",
comment_text,
additional_comment_text)
def reject_patch_from_review_queue(self,
attachment_id,
additional_comment_text=None):
comment_text = "Rejecting attachment %s from review queue." % attachment_id
self.host.bugs.set_flag_on_attachment(attachment_id,
'review',
'-',
comment_text,
additional_comment_text)
|
leighpauls/k2cro4
|
third_party/WebKit/Tools/Scripts/webkitpy/common/config/committervalidator.py
|
Python
|
bsd-3-clause
| 5,209
|
"""
Filename: gth_solve.py
Author: Daisuke Oyama
Routine to compute the stationary distribution of an irreducible Markov
chain by the Grassmann-Taksar-Heyman (GTH) algorithm.
"""
import numpy as np
from numba import jit
from .external import numba_installed, jit
if not numba_installed:
try:
xrange
except: # python3
xrange = range
def gth_solve(A):
r"""
This routine computes the stationary distribution of an irreducible
Markov transition matrix (stochastic matrix) or transition rate
matrix (generator matrix) `A`.
More generally, given a Metzler matrix (square matrix whose
off-diagonal entries are all nonnegative) `A`, this routine solves
for a nonzero solution `x` to `x (A - D) = 0`, where `D` is the
diagonal matrix for which the rows of `A - D` sum to zero (i.e.,
:math:`D_{ii} = \sum_j A_{ij}` for all :math:`i`). One (and only
one, up to normalization) nonzero solution exists corresponding to
each reccurent class of `A`, and in particular, if `A` is
irreducible, there is a unique solution; when there are more than
one solution, the routine returns the solution that contains in its
support the first index `i` such that no path connects `i` to any
index larger than `i`. The solution is normalized so that its 1-norm
equals one. This routine implements the Grassmann-Taksar-Heyman
(GTH) algorithm [1]_, a numerically stable variant of Gaussian
elimination, where only the off-diagonal entries of `A` are used as
the input data. For a nice exposition of the algorithm, see Stewart
[2]_, Chapter 10.
Parameters
----------
A : array_like(float, ndim=2)
Stochastic matrix or generator matrix. Must be of shape n x n.
Returns
-------
x : numpy.ndarray(float, ndim=1)
Stationary distribution of `A`.
References
----------
.. [1] W. K. Grassmann, M. I. Taksar and D. P. Heyman, "Regenerative
Analysis and Steady State Distributions for Markov Chains,"
Operations Research (1985), 1107-1116.
.. [2] W. J. Stewart, Probability, Markov Chains, Queues, and
Simulation, Princeton University Press, 2009.
"""
A1 = np.array(A, dtype=float, copy=True, order='C')
# `order='C'` is for use with Numba <= 0.18.2
# See issue github.com/numba/numba/issues/1103
if len(A1.shape) != 2 or A1.shape[0] != A1.shape[1]:
raise ValueError('matrix must be square')
n = A1.shape[0]
x = np.zeros(n)
if numba_installed:
_gth_solve_jit(A1, x)
return x
# if not numba_installed
# === Reduction === #
for k in xrange(n-1):
scale = np.sum(A1[k, k+1:n])
if scale <= 0:
# There is one (and only one) recurrent class contained in
# {0, ..., k};
# compute the solution associated with that recurrent class.
n = k+1
break
A1[k+1:n, k] /= scale
A1[k+1:n, k+1:n] += np.dot(A1[k+1:n, k:k+1], A1[k:k+1, k+1:n])
# === Backward substitution === #
x[n-1] = 1
for k in xrange(n-2, -1, -1):
x[k] = np.dot(x[k+1:n], A1[k+1:n, k])
# === Normalization === #
x /= np.sum(x)
return x
if numba_installed:
@jit(nopython=True)
def _gth_solve_jit(A, out):
"""
JIT complied version of the main routine of gth_solve.
Parameters
----------
A : numpy.ndarray(float, ndim=2)
Stochastic matrix or generator matrix. Must be of shape n x n.
Data will be overwritten.
out : numpy.ndarray(float, ndim=1)
Output array in which to place the stationary distribution of A.
"""
n = A.shape[0]
# === Reduction === #
for k in range(n-1):
scale = np.sum(A[k, k+1:n])
if scale <= 0:
# There is one (and only one) recurrent class contained in
# {0, ..., k};
# compute the solution associated with that recurrent class.
n = k+1
break
for i in range(k+1, n):
A[i, k] /= scale
for j in range(k+1, n):
A[i, j] += A[i, k] * A[k, j]
# === Backward substitution === #
out[n-1] = 1
for k in range(n-2, -1, -1):
for i in range(k+1, n):
out[k] += out[i] * A[i, k]
# === Normalization === #
norm = np.sum(out)
for k in range(n):
out[k] /= norm
|
mgahsan/QuantEcon.py
|
quantecon/gth_solve.py
|
Python
|
bsd-3-clause
| 4,553
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TaskLog'
db.delete_table('pypi_tasklog')
# Deleting model 'PackageModified'
db.delete_table('pypi_packagemodified')
def backwards(self, orm):
# Adding model 'TaskLog'
db.create_table('pypi_tasklog', (
('status', self.gf('model_utils.fields.StatusField')(default='pending', max_length=100, no_check_for_status=True)),
('exception', self.gf('django.db.models.fields.TextField')(blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=300)),
('task_id', self.gf('uuidfield.fields.UUIDField')(max_length=32, unique=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime(2012, 2, 20, 0, 27, 7, 520036))),
('kwargs', self.gf('django.db.models.fields.TextField')()),
('args', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime(2012, 2, 20, 0, 27, 7, 520138))),
))
db.send_create_signal('pypi', ['TaskLog'])
# Adding model 'PackageModified'
db.create_table('pypi_packagemodified', (
('last_modified', self.gf('django.db.models.fields.CharField')(max_length=150)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime(2012, 2, 20, 0, 27, 7, 521976))),
('url', self.gf('django.db.models.fields.TextField')(unique=True)),
('release_file', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['packages.ReleaseFile'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime(2012, 2, 20, 0, 27, 7, 522074))),
('md5', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('pypi', ['PackageModified'])
models = {
'packages.package': {
'Meta': {'object_name': 'Package'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 989261)'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'downloads_synced_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 989593)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 989365)'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'})
},
'packages.release': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'Release'},
'author': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'author_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classifiers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'releases'", 'blank': 'True', 'to': "orm['packages.TroveClassifier']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 990149)', 'db_index': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'download_uri': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'license': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 990244)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releases'", 'to': "orm['packages.Package']"}),
'platform': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'raw_data': ('crate.fields.json.JSONField', [], {'null': 'True', 'blank': 'True'}),
'requires_python': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'packages.troveclassifier': {
'Meta': {'object_name': 'TroveClassifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trove': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '350'})
},
'pypi.changelog': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'ChangeLog'},
'action': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 988589)'}),
'handled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 988690)'}),
'package': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'})
},
'pypi.downloadchange': {
'Meta': {'object_name': 'DownloadChange'},
'change': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 991858)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 991967)'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Release']"})
},
'pypi.log': {
'Meta': {'ordering': "['-created']", 'object_name': 'Log'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 992325)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 2, 20, 0, 41, 57, 992418)'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'pypi.pypimirrorpage': {
'Meta': {'unique_together': "(('package', 'type'),)", 'object_name': 'PyPIMirrorPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Package']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
}
}
complete_apps = ['pypi']
|
crate-archive/crate-site
|
crateweb/apps/pypi/migrations/0008_auto__del_tasklog__del_packagemodified.py
|
Python
|
bsd-2-clause
| 8,580
|
import datetime
import os
import shutil
import unicodecsv as csv
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from mptindicators.scorecard.models import Country, Section, Indicator, IndicatorScore
from zipfile import ZipFile
TMP_DIR = settings.TMP_DIR
DATA_DIR = os.path.join(settings.BASE_DIR, 'data')
def write_countries(path):
path = os.path.join(path, 'countries.csv')
with open(path, 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(
('code', 'name',
'aggregate_score', 'in_law_score', 'in_practice_score',
'findings', 'electoral_system'))
for c in Country.objects.all().order_by('code'):
row = (c.code, c.name,
c.aggregate_score, c.in_law_score, c.in_practice_score,
c.findings, c.electoral_summary)
writer.writerow(row)
def write_sections(path):
path = os.path.join(path, 'sections.csv')
with open(path, 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(
('section_number', 'section_name',
'subsection_number', 'subsection_name'))
for s in Section.objects.all():
writer.writerow((s.number, s.name, '', ''))
for ss in s.subsections.all():
writer.writerow((s.number, s.name, ss.number, ss.name))
def write_indicators(path):
path = os.path.join(path, 'indicators.csv')
with open(path, 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(
('number', 'name', 'section', 'subsection',
'type', 'criteria', 'comment'))
for i in Indicator.objects.all().select_related():
row = (i.number, i.name,
i.subsection.number, i.subsection.section.number,
i.type, i.criteria, i.comment)
writer.writerow(row)
def write_scores(path):
path = os.path.join(path, 'scores.csv')
with open(path, 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(
('country', 'indicator', 'score', 'comment', 'sources'))
for i in IndicatorScore.objects.all().select_related():
row = (i.country_id, i.indicator_id,
i.score, i.comment, i.sources)
writer.writerow(row)
class Command(BaseCommand):
help = 'Create data archive and publish to S3'
def handle(self, *args, **options):
# get path stuff set up
if not os.path.exists(TMP_DIR):
os.mkdir(TMP_DIR)
now = datetime.date.today()
archive_path = os.path.join(TMP_DIR, now.isoformat())
if not os.path.exists(archive_path):
os.mkdir(archive_path)
# write data
write_countries(archive_path)
write_sections(archive_path)
write_indicators(archive_path)
write_scores(archive_path)
# copy Excel spreadsheet
src_path = os.path.join(DATA_DIR, 'mpt-indicators.xls')
dst_path = os.path.join(archive_path, 'mpt-indicators.xls')
shutil.copyfile(src_path, dst_path)
# zip it
zip_path = os.path.join(TMP_DIR, 'mpt_data.zip')
with ZipFile(zip_path, 'w') as zf:
for filename in os.listdir(archive_path):
zf.write(os.path.join(archive_path, filename), filename)
# cleanup
shutil.rmtree(archive_path)
|
sunlightlabs/mptindicators
|
mptindicators/scorecard/management/commands/publishdata.py
|
Python
|
bsd-3-clause
| 3,481
|
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Build tool setup for *BSD.
This module is a SCons tool which should be include in the topmost windows
environment.
It is used as follows:
env = base_env.Clone(tools = ['component_setup'])
bsd_env = base_env.Clone(tools = ['target_platform_bsd'])
"""
def ComponentPlatformSetup(env, builder_name):
"""Hook to allow platform to modify environment inside a component builder.
Args:
env: Environment to modify
builder_name: Name of the builder
"""
if env.get('ENABLE_EXCEPTIONS'):
env.FilterOut(CCFLAGS=['-fno-exceptions'])
env.Append(CCFLAGS=['-fexceptions'])
#------------------------------------------------------------------------------
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Preserve some variables that get blown away by the tools.
saved = dict()
for k in ['CFLAGS', 'CCFLAGS', 'CXXFLAGS', 'LINKFLAGS', 'LIBS']:
saved[k] = env.get(k, [])
env[k] = []
# Use g++
env.Tool('g++')
env.Tool('gcc')
env.Tool('gnulink')
env.Tool('ar')
env.Tool('as')
# Set target platform bits
env.SetBits('bsd', 'posix')
env.Replace(
TARGET_PLATFORM='BSD',
COMPONENT_PLATFORM_SETUP=ComponentPlatformSetup,
CCFLAG_INCLUDE='-include', # Command line option to include a header
# Code coverage related.
COVERAGE_CCFLAGS=['-ftest-coverage', '-fprofile-arcs'],
COVERAGE_LIBS='gcov',
COVERAGE_STOP_CMD=[
'$COVERAGE_MCOV --directory "$TARGET_ROOT" --output "$TARGET"',
('$COVERAGE_GENHTML --output-directory $COVERAGE_HTML_DIR '
'$COVERAGE_OUTPUT_FILE'),
],
)
env.Append(
HOST_PLATFORMS=['BSD'],
CPPDEFINES=['OS_BSD=OS_BSD'],
# Settings for debug
CCFLAGS_DEBUG=[
'-O0', # turn off optimizations
'-g', # turn on debugging info
],
# Settings for optimized
CCFLAGS_OPTIMIZED=['-O2'],
# Settings for component_builders
COMPONENT_LIBRARY_LINK_SUFFIXES=['.so', '.a'],
COMPONENT_LIBRARY_DEBUG_SUFFIXES=[],
)
# Restore saved flags.
env.Append(**saved)
|
asacamano/keyczar
|
cpp/src/tools/swtoolkit/site_scons/site_tools/target_platform_bsd.py
|
Python
|
apache-2.0
| 3,718
|
#!/usr/bin/python2
import os
import re
import sys
import argparse
import matplotlib.pyplot as plt
from pylab import *
"""
opens output files from ./output
creates graphs from the output files in ./images
to create output files in ./output
$ ./run_alot.sh
"""
def makediffs(X, Y, hX, hY):
diffs = []
for i in xrange(len(X)):
x = X[i]
y = Y[i]
delta = abs(y - hY[x])
diffs.append(delta)
return diffs
def calculateMAE(diffs):
N = len(diffs)
return sum(diffs) / float(N)
class DictWrapper(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
class Organizer(dict):
files = [] # DictWrapper, content),... ,
def getall(self, bc=None, trc=None, alg=None, csize=None, gsize=None):
ret = []
for dw in self.files:
use = True
if bc and dw.bc != bc:
use = False
if alg and dw.alg != alg:
use = False
if trc and dw.trc != trc:
use = False
if csize and dw.csize != csize:
use = False
if gsize and dw.gsize != gsize:
use = False
if use:
ret.append(dw)
return ret
def main(BASESIZE, outputdir):
files = os.listdir(outputdir)
algs = {}
traces = {}
o = Organizer()
# DOING: change this to open all the files in a specific directory and plot the histograms together
for fn in files:
lines = open("%s/%s" % (outputdir, fn), "r").readlines()
# filename: algorithm_trace_buckets_cachesize_ghostlistsize.(histogram|stats)
fm = re.match("(?P<alg>\w+)_(?P<trc>\w+)_(?P<bc>\d+)_(?P<csize>\d+)_(?P<gsize>\d+)\.(?P<type>\w+)", fn)
d1 = DictWrapper(fm.groupdict())
if d1.type == "stats":
m1 = re.match("Hits=(?P<hits>\d+), Misses=(?P<misses>\d+), Requests=(?P<requests>\d+)", lines[0])
d2 = DictWrapper(m1.groupdict())
d1.update(d2)
algs[d1.alg] = 1
traces[d1.trc] = 1
histogramlines = open("%s/%s" % (outputdir, fn.replace("stats", "histogram")), "r").readlines()
d1.histogram = [(int(x[0]), float(x[1])) for x in map(lambda x:x.split(" "), histogramlines)]
o.files.append(d1)
elif d1.type == "histogram":
pass
else:
assert 0
print len(o.files)
#BASESIZE = 1500
for alg in algs.keys():
for trc in traces.keys():
print "Generating plot for %s,%s" % (alg, trc)
plt.clf()
fig, ax = plt.subplots()
ores = o.getall(alg=alg, trc=trc)
ores.sort(key=lambda x: int(x.csize))
X = [int(x.csize) for x in ores]
Y = [float(x.hits) / float(x.requests) for x in ores]
histogram = filter(lambda x:int(x.csize) == BASESIZE, ores)[0].histogram
hX = [z[0] for z in histogram]
hY = [z[1] for z in histogram]
diffs = makediffs(X, Y, hX, hY)
#print diffs
print "maxdiff = %.1f%%, mindiff = %.1%%f, MAE=%.1f%%\n" % (100 * max(diffs), 100 * min(diffs), 100 * calculateMAE(diffs))
#print alg, [(x.csize, x.hits) for x in ores]
plt.plot(X, Y, 'ro')
plt.plot(hX, hY)
fpref = "images/%s_%s_%d" % (alg, trc, BASESIZE)
ax.set_ylabel('Cumulative hit rate')
ax.set_xlabel('Cache size (# items)')
ax.set_title('Algorithm: %s, trace: %s and base from %d items ' % (alg, trc, BASESIZE))
plt.savefig('%s.png' % fpref, format='png')
plt.savefig('%s.eps' % fpref, format='eps', dpi=1000)
"""
averages = {}
stdevs = {}
alg_order = [("lru", "regular"), ("lru+rounder","rounder"), ("lru+avl","avl"), ("lru+mattson","mattson")]
sizes = [5000]
N = len(algs.keys())
M = len(sizes)
for alg in algs.keys():
for sz in sizes:
ores = o.getall(alg=alg)
arr = [float(x.t) for x in ores]
avg = sum(arr) / float(len(arr))
stdev = np.std(arr)
print alg, avg, stdev
averages[alg,sz] = avg
stdevs[alg,sz] = stdev
ind = np.arange(M) # the x locations for the groups
width = 0.14 # the width of the bars
offset = 0.10
print o.files
fig, ax = plt.subplots()
rects = []
i = 0
colors = ['y', 'g', 'b', 'c', 'm', 'p']
for n, alg in alg_order:
rMeans = tuple( averages[alg,size] for size in sizes )
rStd = tuple( stdevs[alg,size] for size in sizes )
rBar = ax.bar(ind + offset + width*(i+1), rMeans, width, color=colors[i], yerr=rStd)
rects.append((rBar, "%s" % n))
i += 1
#womenMeans = tuple( averages[mem, 'rounder', 4] for mem in sorted(mems.keys()))
#womenStd = (3, 5, 2, 3, 3)
#rects2 = ax.bar(ind+width, womenMeans, width, color='y', yerr=womenStd)
#rects2 = ax.bar(ind+width, womenMeans, width, color='y')
# add some
ax.set_ylabel('Running time [s]')
ax.set_xlabel('Cache size')
ax.set_title('Statistics algorithms')
ax.set_xticks(ind+offset + width *(M / 2.0))
#ax.set_xticklabels( tuple(map(str, sorted(mems.keys()))) )
ax.legend( tuple(rects[i][0] for i in xrange(N)), tuple(rects[i][1] for i in xrange(N)) )
#ax.legend( (rects1[0], rects2[0]), ('Unmodified', 'Rounder B=4') )
"""
""" # TODO: fixme!!
memskeys = sorted(mems.keys())
def autolabel(_rects, mc):
# attach some text labels
i = 0
for _rect in _rects:
ovhead = overheads[memskeys[i], bc]
height = _rect.get_height()
ax.text(_rect.get_x()+_rect.get_width()/2., 1.05*height, '%.1f%%' % ovhead,
ha='center', va='bottom', fontsize = 4)
i += 1
#autolabel(rects1)
i = 1
for bc in sorted(bcs.keys()):
autolabel(rects[i][0], bc)
i += 1
"""
#plt.savefig('overhead.png', format='png')
#plt.savefig('overhead.eps', format='eps', dpi=1000)
#plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outputdir", help="The output directory to use", type=str, default="", required=True)
parser.add_argument("-b", "--basesize", help="The basesize to use", type=int, default=700, required=True)
args = parser.parse_args()
outputdir = args.outputdir
BASESIZE = args.basesize
main(BASESIZE, outputdir)
|
trauzti/mimir
|
pymimir/make_images.py
|
Python
|
isc
| 6,617
|
from twisted.web import resource
import os
from hatena import Log, ServerLog, Silent, NotFound
from DB import Database
from Hatenatools import TMB
#The movie folder:
class PyResource(resource.Resource):
isLeaf = False
def __init__(self):
resource.Resource.__init__(self)
self.CreatorID = CreatorIDResource()
def getChild(self, name, request):
if Database.CreatorExists(name):
return self.CreatorID
elif name == "":
return self
else:
return NotFound
def render(self, request):
request.setResponseCode(403)
return "403 - Denied access"
#The creator ID folder:
class CreatorIDResource(resource.Resource):
isLeaf = False
def __init__(self):
resource.Resource.__init__(self)
self.CreatorIDFile = CreatorIDFileResource()
def getChild(self, name, request):
CreatorID = request.path.split("/")[-2]
filename = ".".join(name.split(".")[:-1])
if Database.FlipnoteExists(CreatorID, filename):#html, ppm and info
return self.CreatorIDFile
elif name == "":
return self
else:
return NotFound
def render(self, request):
request.setResponseCode(403)
return "403 - Denied access"
#Any public file inside creator ID folder:
class CreatorIDFileResource(resource.Resource):
isLeaf = True
def __init__(self):
resource.Resource.__init__(self)
def render(self, request):
creator, file = request.path.split("/")[-2:]
filetype = file.split(".")[-1].lower()
if filetype in "ppm":
#log it:
path = "/".join(request.path.split("/")[3:])
Log(request, path)
#add a view:
Database.AddView(creator, file[:-4])
#read ppm file:
data = Database.GetFlipnotePPM(creator, file[:-4])
#send file to the client:
request.responseHeaders.setRawHeaders('content-type', ['text/plain'])
return data
elif filetype == "info":
path = "/".join(request.path.split("/")[3:])
Log(request, path, True)
request.responseHeaders.setRawHeaders('content-type', ['text/plain'])
return "0\n0\n"#undocumented what it means
elif filetype == "htm":
#maybe cache the details page of Database.Newest?
if "mode" in request.args:
if request.args["mode"][0] == "commentshalfsize":
pass
return self.GenerateDetailsPage(creator, ".".join(file.split(".")[:-1])).encode("UTF-8")
elif filetype == "star":
path = "/".join(request.path.split("/")[3:])
headers = request.getAllHeaders()
#bad formatting
if "x-hatena-star-count" not in headers:
ServerLog.write("%s got 403 when requesting %s without a X-Hatena-Star-Count header" % (request.getClientIP(), path), Silent)
request.setResponseCode(400)
return "400 - Denied access\nRequest lacks a X-Hatena-Star-Count http header"
#add the stars:
amount = int(headers["x-hatena-star-count"])
if not Database.AddStar(creator, file[:-5], amount):
#error
ServerLog.write("%s got 500 when requesting %s" % (request.getClientIP(), path), Silent)
request.setResponseCode(500)
return "500 - Internal server error\nAdding the stars seem to have failed."
#report success
ServerLog.write("%s added %i stars to %s/%s.ppm" % (request.getClientIP(), amount, creator, file[:-5]), Silent)
return "Success"
elif filetype == "dl":
path = "/".join(request.path.split("/")[3:])
Log(request, path, True)
#this is POSTed to when it've been stored to memory.
Database.AddDownload(creator, file[:-3])
return "Noted ;)"
else:
path = "/".join(request.path.split("/")[3:])
ServerLog.write("%s got 403 when requesting %s" % (request.getClientIP(), path), Silent)
request.setResponseCode(403)
return "403 - Denied access"
#details page
def GenerateDetailsPage(self, CreatorID, filename):#filename without ext
flipnote = Database.GetFlipnote(CreatorID, filename)#flipnote = [filename, views, stars, green stars, red stars, blue stars, purple stars, Channel], all probably strings
if not flipnote:
return "This flipnote doesn't exist!"
tmb = TMB().Read(Database.GetFlipnoteTMB(CreatorID, filename))
if not tmb:
return "This flipnote is corrupt!"
#Is it a spinoff?
Spinnoff = ""
if tmb.OriginalAuthorID <> tmb.EditorAuthorID or tmb.OriginalFilename <> tmb.CurrentFilename:
if Database.FlipnoteExists(tmb.OriginalAuthorID, tmb.OriginalFilename[:-4]):
Spinnoff = SpinoffTemplate1.replace("%%CreatorID%%", tmb.OriginalAuthorID).replace("%%Filename%%", tmb.OriginalFilename[:-4])
elif tmb.OriginalAuthorID <> tmb.EditorAuthorID:
Spinnoff = SpinoffTemplate2
#make each entry:
Entries = []
#Creator username:
name = "Creator"
#content = "<a href=\"http://flipnote.hatena.com/ds/ds/v2-xx/%s/profile.htm?t=260&pm=80\">%s</a>" % (CreatorID, tmb.EditorAuthorName)
content = '<a href="http://flipnote.hatena.com/ds/v2-xx/%s/profile.htm?t=260&pm=80\">%s</a>' % (CreatorID, tmb.Username)
Entries.append(PageEntryTemplate.replace("%%Name%%", name).replace("%%Content%%", content))
#Stars:
name = "Stars"
content = u'<a href="http://flipnote.hatena.com/ds/v2-xx/movie/%s/%s.htm?mode=stardetail"><span class="star0c">\u2605</span> <span class="star0">%s</span></a>' % (CreatorID, filename, flipnote[2])#yellow stars
#todo: add other stars
Entries.append(PageEntryTemplate.replace("%%Name%%", name).replace("%%Content%%", content))
#Views:
name = "Views"
content = str(flipnote[1])
Entries.append(PageEntryTemplate.replace("%%Name%%", name).replace("%%Content%%", content))
#Channel:
if flipnote[7]:#todo: make channels work at all
name = "Channel"
content = 'a href="http://flipnote.hatena.com/ds/v2-xx/ch/%s.uls">%s</a>' % (flipnote[7], flipnote[7])
Entries.append(PageEntryTemplate.replace("%%Name%%", name).replace("%%Content%%", content))
#Comments:
Comments = "0"
#doto: add original author info too
#add the entries to page:
return DetailsPageTemplate.replace("%%CreatorID%%", CreatorID).replace("%%Filename%%", filename).replace("%%CommentCount%%", Comments).replace("%%Spinoff%%", Spinnoff).replace("%%PageEntries%%", PageEntrySeparator.join(Entries))
#templates:
DetailsPageTemplate = """<html>
<head>
<title>Flipnote by %%Username%%</title>
<meta name="bgm" content="1">
<meta name="upperlink" content="http://flipnote.hatena.com/ds/v2-xx/movie/%%CreatorID%%/%%Filename%%.ppm">
<meta name="starbutton" content="http://flipnote.hatena.com/ds/v2-xx/movie/%%CreatorID%%/%%Filename%%.star">
<meta name="savebutton" content="http://flipnote.hatena.com/ds/v2-xx/movie/%%CreatorID%%/%%Filename%%.ppm">
<meta name="playcontrolbutton" content="">
<link rel="stylesheet" href="http://flipnote.hatena.com/css/ds/basic.css">
</head>
<body>
<table width="240" border="0" cellspacing="0" cellpadding="0" class="tab">
<tr>
<td class="border" width="5" align="center">
<div class="border"></div>
</td>
<td class="border" width="70" align="center">
<div class="border"></div>
</td>
<td class="border" width="95" align="center">
<div class="border"></div>
</td>
</tr>
<tr> <!-- top row, description and comments -->
<td class="space"> </td>
<td class="tabon" align="center"> <!-- selected -->
<div class="on" align="center">Meta</div> <!-- selected -->
</td>
<td class="taboff" align="center"> <!-- not selected -->
<a class="taboff" href="http://flipnote.hatena.com/ds/v2-eu/movie/%%CreatorID%%/%%Filename%%.htm?mode=commentshalfsize">%%CommentCount%% Comments</a>
</td>
</tr>
</table>
<div class="pad5b"></div>%%Spinoff%%
<table width="226" border="0" cellspacing="0" cellpadding="0" class="detail">%%PageEntries%%
</table>
</body>
</html>"""
SpinoffTemplate1 = """
<div class="notice2" align="center">
This Flipnote is a spin-off.<br>
<a href="http://flipnote.hatena.com/ds/v2-eu/movie/%%CreatorID%%/%%Filename%%.htm">Original Flipnote</a>
</div>"""
SpinoffTemplate2 = """
<div class="notice2" align="center">
This Flipnote is a spin-off.
</div>"""
PageEntryTemplate = """
<tr>
<th width="90">
<div class="item-term" align="left">%%Name%%</div>
</th>
<td width="136">
<div class="item-value" align="right">
%%Content%%
</div>
</td>
</tr>"""
PageEntrySeparator="""
<tr> </tr>
<tr>
<td colspan="2">
<div class="hr"></div>
</td>
</tr>"""
|
dylmye/hatena-server
|
public/ds/v2-xx/movie.py
|
Python
|
isc
| 8,285
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright 2007 Frederic Leroy <fredo@starox.org>
# hook extension to send change notifications to buildbot when a changeset is
# brought into the repository from elsewhere.
#
# See the Buildbot manual for configuration instructions.
import os
from mercurial.node import bin, hex, nullid #@UnresolvedImport
# mercurial's on-demand-importing hacks interfere with the:
#from zope.interface import Interface
# that Twisted needs to do, so disable it.
try:
from mercurial import demandimport
demandimport.disable()
except ImportError:
pass
# In Mercurial post-1.7, some strings might be stored as a
# encoding.localstr class. encoding.fromlocal will translate
# those back to UTF-8 strings.
try:
from mercurial.encoding import fromlocal
_hush_pyflakes = [fromlocal]
del _hush_pyflakes
except ImportError:
def fromlocal(s):
return s
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
# read config parameters
baseurl = ui.config('hgbuildbot', 'baseurl',
ui.config('web', 'baseurl', ''))
masters = ui.configlist('hgbuildbot', 'master')
if masters:
branchtype = ui.config('hgbuildbot', 'branchtype', 'inrepo')
branch = ui.config('hgbuildbot', 'branch')
fork = ui.configbool('hgbuildbot', 'fork', False)
# notify also has this setting
stripcount = int(ui.config('notify','strip') or ui.config('hgbuildbot','strip',3))
category = ui.config('hgbuildbot', 'category', None)
project = ui.config('hgbuildbot', 'project', '')
auth = ui.config('hgbuildbot', 'auth', None)
else:
ui.write("* You must add a [hgbuildbot] section to .hg/hgrc in "
"order to use buildbot hook\n")
return
if hooktype != "changegroup":
ui.status("hgbuildbot: hooktype %s not supported.\n" % hooktype)
return
if fork:
child_pid = os.fork()
if child_pid == 0:
#child
pass
else:
#parent
ui.status("Notifying buildbot...\n")
return
# only import inside the fork if forked
from buildbot.clients import sendchange
from twisted.internet import defer, reactor
if branch is None:
if branchtype == 'dirname':
branch = os.path.basename(repo.root)
if not auth:
auth = 'change:changepw'
auth = auth.split(':', 1)
# process changesets
def _send(res, s, c):
if not fork:
ui.status("rev %s sent\n" % c['revision'])
return s.send(c['branch'], c['revision'], c['comments'],
c['files'], c['username'], category=category,
repository=repository, project=project, vc='hg',
properties=c['properties'])
try: # first try Mercurial 1.1+ api
start = repo[node].rev()
end = len(repo)
except TypeError: # else fall back to old api
start = repo.changelog.rev(bin(node))
end = repo.changelog.count()
repository = strip(repo.root, stripcount)
repository = baseurl + repository
for master in masters:
s = sendchange.Sender(master, auth=auth)
d = defer.Deferred()
reactor.callLater(0, d.callback, None)
for rev in xrange(start, end):
# send changeset
node = repo.changelog.node(rev)
manifest, user, (time, timezone), files, desc, extra = repo.changelog.read(node)
parents = filter(lambda p: not p == nullid, repo.changelog.parents(node))
if branchtype == 'inrepo':
branch = extra['branch']
is_merge = len(parents) > 1
# merges don't always contain files, but at least one file is required by buildbot
if is_merge and not files:
files = ["merge"]
properties = {'is_merge': is_merge}
if branch:
branch = fromlocal(branch)
change = {
'master': master,
'username': fromlocal(user),
'revision': hex(node),
'comments': fromlocal(desc),
'files': files,
'branch': branch,
'properties':properties
}
d.addCallback(_send, s, change)
def _printSuccess(res):
ui.status(s.getSuccessString(res) + '\n')
def _printFailure(why):
ui.warn(s.getFailureString(why) + '\n')
d.addCallbacks(_printSuccess, _printFailure)
d.addBoth(lambda _ : reactor.stop())
reactor.run()
if fork:
os._exit(os.EX_OK)
else:
return
# taken from the mercurial notify extension
def strip(path, count):
'''Strip the count first slash of the path'''
# First normalize it
path = '/'.join(path.split(os.sep))
# and strip it part after part
while count > 0:
c = path.find('/')
if c == -1:
break
path = path[c + 1:]
count -= 1
return path
|
denny820909/builder
|
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/changes/hgbuildbot.py
|
Python
|
mit
| 5,750
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BatchPanel.py
---------------------
Date : November 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Alexander Bruy'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QTableWidgetItem, QComboBox, QHeaderView, QFileDialog, QMessageBox
from qgis.core import QgsApplication
from qgis.gui import QgsMessageBar
from processing.gui.BatchOutputSelectionPanel import BatchOutputSelectionPanel
from processing.core.parameters import ParameterFile # NOQA
from processing.core.parameters import ParameterRaster # NOQA
from processing.core.parameters import ParameterTable # NOQA
from processing.core.parameters import ParameterVector # NOQA
from processing.core.parameters import ParameterExtent # NOQA
from processing.core.parameters import ParameterCrs # NOQA
from processing.core.parameters import ParameterPoint # NOQA
from processing.core.parameters import ParameterSelection # NOQA
from processing.core.parameters import ParameterFixedTable # NOQA
from processing.core.parameters import ParameterMultipleInput # NOQA
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBatchPanel.ui'))
class BatchPanel(BASE, WIDGET):
PARAMETERS = "PARAMETERS"
OUTPUTS = "OUTPUTS"
def __init__(self, parent, alg):
super(BatchPanel, self).__init__(None)
self.setupUi(self)
self.wrappers = []
self.btnAdvanced.hide()
# Set icons
self.btnAdd.setIcon(QgsApplication.getThemeIcon('/symbologyAdd.svg'))
self.btnRemove.setIcon(QgsApplication.getThemeIcon('/symbologyRemove.svg'))
self.btnOpen.setIcon(QgsApplication.getThemeIcon('/mActionFileOpen.svg'))
self.btnSave.setIcon(QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnAdvanced.setIcon(QgsApplication.getThemeIcon("/processingAlgorithm.svg"))
self.alg = alg
self.parent = parent
self.btnAdd.clicked.connect(self.addRow)
self.btnRemove.clicked.connect(self.removeRows)
self.btnOpen.clicked.connect(self.load)
self.btnSave.clicked.connect(self.save)
self.btnAdvanced.toggled.connect(self.toggleAdvancedMode)
self.tblParameters.horizontalHeader().sectionDoubleClicked.connect(
self.fillParameterValues)
self.initWidgets()
def layerRegistryChanged(self):
pass
def initWidgets(self):
# If there are advanced parameters — show corresponding button
for param in self.alg.parameters:
if param.isAdvanced:
self.btnAdvanced.show()
break
# Determine column count
nOutputs = self.alg.getVisibleOutputsCount() + 1
if nOutputs == 1:
nOutputs = 0
self.tblParameters.setColumnCount(
self.alg.getVisibleParametersCount() + nOutputs)
# Table headers
column = 0
for param in self.alg.parameters:
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(param.description))
if param.isAdvanced:
self.tblParameters.setColumnHidden(column, True)
column += 1
for out in self.alg.outputs:
if not out.hidden:
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(out.description))
column += 1
# Last column for indicating if output will be added to canvas
if self.alg.getVisibleOutputsCount():
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(self.tr('Load in QGIS')))
# Add three empty rows by default
for i in range(3):
self.addRow()
self.tblParameters.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive)
self.tblParameters.horizontalHeader().setDefaultSectionSize(250)
self.tblParameters.horizontalHeader().setMinimumSectionSize(150)
self.tblParameters.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tblParameters.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tblParameters.horizontalHeader().setStretchLastSection(True)
def load(self):
filename, selected_filter = QFileDialog.getOpenFileName(self,
self.tr('Open batch'), None,
self.tr('JSON files (*.json)'))
if filename:
with open(filename) as f:
values = json.load(f)
else:
# If the user clicked on the cancel button.
return
self.tblParameters.setRowCount(0)
try:
for row, alg in enumerate(values):
self.addRow()
params = alg[self.PARAMETERS]
outputs = alg[self.OUTPUTS]
column = 0
for param in self.alg.parameters:
if param.hidden:
continue
if param.name in params:
value = params[param.name].strip('"')
wrapper = self.wrappers[row][column]
wrapper.setValue(value)
column += 1
for out in self.alg.outputs:
if out.hidden:
continue
if out.name in outputs:
value = outputs[out.name].strip('"')
widget = self.tblParameters.cellWidget(row, column)
widget.setValue(value)
column += 1
except TypeError:
QMessageBox.critical(
self,
self.tr('Error'),
self.tr('An error occurred while reading your file.'))
def save(self):
toSave = []
for row in range(self.tblParameters.rowCount()):
algParams = {}
algOutputs = {}
col = 0
alg = self.alg.getCopy()
for param in alg.parameters:
if param.hidden:
continue
wrapper = self.wrappers[row][col]
if not self.setParamValue(param, wrapper, alg):
self.parent.bar.pushMessage("", self.tr('Wrong or missing parameter value: {0} (row {1})').format(
param.description, row + 1),
level=QgsMessageBar.WARNING, duration=5)
return
algParams[param.name] = param.getValueAsCommandLineParameter()
col += 1
for out in alg.outputs:
if out.hidden:
continue
widget = self.tblParameters.cellWidget(row, col)
text = widget.getValue()
if text.strip() != '':
algOutputs[out.name] = text.strip()
col += 1
else:
self.parent.bar.pushMessage("", self.tr('Wrong or missing output value: {0} (row {1})').format(
out.description, row + 1),
level=QgsMessageBar.WARNING, duration=5)
return
toSave.append({self.PARAMETERS: algParams, self.OUTPUTS: algOutputs})
filename, __ = QFileDialog.getSaveFileName(self,
self.tr('Save batch'),
None,
self.tr('JSON files (*.json)'))
if filename:
if not filename.endswith('.json'):
filename += '.json'
with open(filename, 'w') as f:
json.dump(toSave, f)
def setParamValue(self, param, wrapper, alg=None):
return param.setValue(wrapper.value())
def setCellWrapper(self, row, column, wrapper):
self.wrappers[row][column] = wrapper
self.tblParameters.setCellWidget(row, column, wrapper.widget)
def addRow(self):
self.wrappers.append([None] * self.tblParameters.columnCount())
self.tblParameters.setRowCount(self.tblParameters.rowCount() + 1)
wrappers = {}
row = self.tblParameters.rowCount() - 1
column = 0
for param in self.alg.parameters:
if param.hidden:
continue
wrapper = param.wrapper(self.parent, row, column)
wrappers[param.name] = wrapper
self.setCellWrapper(row, column, wrapper)
column += 1
for out in self.alg.outputs:
if out.hidden:
continue
self.tblParameters.setCellWidget(
row, column, BatchOutputSelectionPanel(
out, self.alg, row, column, self))
column += 1
if self.alg.getVisibleOutputsCount():
item = QComboBox()
item.addItem(self.tr('Yes'))
item.addItem(self.tr('No'))
item.setCurrentIndex(0)
self.tblParameters.setCellWidget(row, column, item)
for wrapper in list(wrappers.values()):
wrapper.postInitialize(list(wrappers.values()))
def removeRows(self):
if self.tblParameters.rowCount() > 2:
self.wrappers.pop()
self.tblParameters.setRowCount(self.tblParameters.rowCount() - 1)
def fillParameterValues(self, column):
wrapper = self.wrappers[0][column]
for row in range(1, self.tblParameters.rowCount()):
self.wrappers[row][column].setValue(wrapper.value())
def toggleAdvancedMode(self, checked):
for column, param in enumerate(self.alg.parameters):
if param.isAdvanced:
self.tblParameters.setColumnHidden(column, not checked)
|
gioman/QGIS
|
python/plugins/processing/gui/BatchPanel.py
|
Python
|
gpl-2.0
| 11,053
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libdmx(AutotoolsPackage):
"""libdmx - X Window System DMX (Distributed Multihead X) extension
library."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libdmx"
url = "https://www.x.org/archive/individual/lib/libdmx-1.1.3.tar.gz"
version('1.1.3', 'eed755e7cdb161e05f70e955f2b0ef4d')
depends_on('libx11')
depends_on('libxext')
depends_on('xextproto', type='build')
depends_on('dmxproto@2.2.99.1:', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/libdmx/package.py
|
Python
|
lgpl-2.1
| 1,814
|
import scrapy
import json
from deals.settings import HTML_STORAGE
from deals.lib.file_storage import list_dir
class AldiSpider(scrapy.Spider):
name = 'Aldi'
base_domain = 'aldi.ie'
allowed_domains = ['aldi.ie']
# TODO: pull from mongo
start_urls = [
"https://www.aldi.ie/api/productsearch/category/super-6"
]
replay = False
def __init__(self, *args, **kwargs):
super(AldiSpider, self).__init__(*args, **kwargs)
replay = kwargs.get('replay')
date = kwargs.get("date")
if replay and date:
self.logger.info("Replaying old scrap {}".format(date))
file_path = "{}/{}/{}".format(
HTML_STORAGE.get('PATH'),
self.name,
date
)
self.start_urls = [
"file://{}/{}".format(file_path, f)
for f in list_dir(file_path)
]
self.replay = replay
def parse(self, response):
return json.loads(response.body)
|
astrok100/Weekly-Deals-Scrapy
|
deals/spiders/aldi.py
|
Python
|
mpl-2.0
| 1,026
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._hd_insight_management_client import HDInsightManagementClient
from ._version import VERSION
__version__ = VERSION
__all__ = ['HDInsightManagementClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
|
Azure/azure-sdk-for-python
|
sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/__init__.py
|
Python
|
mit
| 731
|
# This import fixes sys.path issues
from .parentpath import *
import os
import re
import unittest
import logging
import StringIO
from ogutils.loggers import default
from ogutils.system import streams
LOCAL_LOG_DIR = os.path.join(os.path.dirname(__file__), 'logs')
class FlaskLoggerTest(unittest.TestCase):
def clear_logs(self):
for fname in os.listdir(LOCAL_LOG_DIR):
file_path = os.path.join(LOCAL_LOG_DIR, fname)
if os.path.isfile(file_path):
os.unlink(file_path)
def read_console_log(self):
with open(os.path.join(LOCAL_LOG_DIR, 'console.log'), 'r') as console:
return ''.join(console.readlines())
def setUp(self):
if not os.path.exists(LOCAL_LOG_DIR):
os.makedirs(LOCAL_LOG_DIR)
self.clear_logs()
self.log_matcher = re.compile('\[\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d\] Log Me!\n')
self.logger = default.build_default_logger(
'default_logger',
log_level=logging.INFO,
log_dir=LOCAL_LOG_DIR)
def tearDown(self):
self.logger.handlers = []
self.clear_logs()
def test_logger_default_level(self):
self.logger.debug('Skip me')
self.assertEquals(self.read_console_log(), '')
def test_logger_stdout(self):
stdout = StringIO.StringIO()
with streams.StdRedirector(stdout=stdout):
self.assertEqual(len(re.findall(self.log_matcher, self.read_console_log())), 0)
self.logger.info('Log Me!')
self.assertEqual(len(re.findall(self.log_matcher, self.read_console_log())), 1)
self.assertEqual(len(re.findall(self.log_matcher, stdout.getvalue())), 1)
self.logger.info('Log Me!')
self.assertEqual(len(re.findall(self.log_matcher, self.read_console_log())), 2)
self.assertEqual(len(re.findall(self.log_matcher, stdout.getvalue())), 2)
def test_logger_stderr(self):
stderr = StringIO.StringIO()
with streams.StdRedirector(stderr=stderr):
self.assertEqual(len(re.findall(self.log_matcher, self.read_console_log())), 0)
self.logger.error('Log Me!')
self.assertEqual(len(re.findall(self.log_matcher, self.read_console_log())), 1)
self.assertEqual(len(re.findall(self.log_matcher, stderr.getvalue())), 1)
self.logger.error('Log Me!')
self.assertEqual(len(re.findall(self.log_matcher, self.read_console_log())), 2)
self.assertEqual(len(re.findall(self.log_matcher, stderr.getvalue())), 2)
if __name__ == "__main__":
unittest.main()
|
OpenGov/og-python-utils
|
tests/loggers_default_test.py
|
Python
|
mit
| 2,700
|
#
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.sgi.com/grafica/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFilter, ImageStat
class _Enhance:
def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.convert("L").convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid grey image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brighntess of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
|
Amechi101/concepteur-market-app
|
venv/lib/python2.7/site-packages/PIL/ImageEnhance.py
|
Python
|
mit
| 2,760
|
#!/usr/bin/env python3
import sys
tpl, ver = sys.argv[1:]
print(open(tpl, "rt").read().format(version=ver[1:].replace("-", "_")))
|
terhorst/psmcpp
|
conda/template.py
|
Python
|
gpl-3.0
| 130
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from tinctest.lib import local_path
from tinctest import TINCTestCase
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from gppylib.db import dbconn
class Verification(TINCTestCase):
dbname = 'mpp18816_db'
@classmethod
def setUpClass(cls):
tinctest.logger.info('Running Verification...')
def run_SQLQuery(self, exec_sql, dbname = 'template1'):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def test_checklog(self):
''' Select from gp_toolkit log message to see if the concurrent test run resulted in PANIC messages'''
log_sql = "select logseverity, logstate, substring(logmessage from 0 for 60) from gp_toolkit.__gp_log_master_ext where logmessage \
like '%Unrecognized DTX transaction context%' or logmessage like '%proclock table corrupted%' or logseverity = 'PANIC' ;"
result = self.run_SQLQuery(log_sql, dbname = Verification.dbname)
for (logsev, logstate, logmsg) in result:
if (logsev.strip() == 'PANIC' or 'Unrecognized DTX transaction context' in logmsg or 'proclock table corrupted' in logmsg ):
raise Exception('Master log shows PANIC or other error messages: Please check the master_log')
tinctest.logger.info('No PANIC messages found in logs')
def test_gpcheckcat(self):
dbstate = DbStateClass('run_validation')
dbstate.check_catalog(alldb = False, dbname = Verification.dbname)
|
rvs/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/filerep/mpp18816/verify/verify.py
|
Python
|
apache-2.0
| 2,327
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new creative sets.
To determine which creative sets exist, run get_all_creative_sets.py.
Tags: CreativeSetService.createCreativeSet
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
MASTER_CREATIVE_ID = 'INSERT_MASTER_CREATIVE_ID_HERE'
COMPANION_CREATIVE_ID = 'INSERT_COMPANION_CREATIVE_ID_HERE'
def main(client, master_creative_id, companion_creative_id):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v201306')
# Create creative set objects.
creative_set = {'name': 'Creative set #%s' % Utils.GetUniqueName(),
'masterCreativeId': master_creative_id,
'companionCreativeIds': [companion_creative_id]}
# Add creative sets.
creative_set = creative_set_service.CreateCreativeSet(creative_set)[0]
# Display results.
if creative_set:
print (('Creative set with ID \'%s\', master creative ID \'%s\', and '
'companion creative IDs {%s} was created.')
% (creative_set['id'], creative_set['masterCreativeId'],
','.join(creative_set['companionCreativeIds'])))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, MASTER_CREATIVE_ID, COMPANION_CREATIVE_ID)
|
lociii/googleads-python-lib
|
examples/adspygoogle/dfp/v201306/creative_set_service/create_creative_set.py
|
Python
|
apache-2.0
| 2,334
|
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
class Solution:
def connect(self, root: 'Node') -> 'Node':
curr = root
while curr:
p = curr
while p:
if p.left:
p.left.next = p.right
if p.next:
p.right.next = p.next.left
p = p.next
curr = curr.left
return root
|
jiadaizhao/LeetCode
|
0101-0200/0116-Populating Next Right Pointers in Each Node/0116-Populating Next Right Pointers in Each Node.py
|
Python
|
mit
| 612
|
#
# Part of p5: A Python package based on Processing
# Copyright (C) 2017-2019 Abhik Pal
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import builtins
from collections import namedtuple
from enum import IntEnum
Position = namedtuple('Position', ['x', 'y'])
handler_names = ['key_pressed', 'key_released', 'key_typed',
'mouse_clicked', 'mouse_double_clicked',
'mouse_dragged', 'mouse_moved',
'mouse_pressed', 'mouse_released', 'mouse_wheel', ]
class VispyButton(IntEnum):
LEFT = 1
RIGHT = 2
MIDDLE = 3
class MouseButton:
"""An abstraction over a set of mouse buttons.
:param buttons: list of mouse buttons pressed at the same time.
:type buttons: str list
"""
def __init__(self, buttons):
button_names = {
VispyButton.LEFT: 'LEFT',
VispyButton.RIGHT: 'RIGHT',
VispyButton.MIDDLE: 'MIDDLE',
}
self._buttons = buttons
self._button_names = [button_names[bt] for bt in self._buttons]
@property
def buttons(self):
self._button_names
def __eq__(self, other):
button_map = {
'CENTER': VispyButton.MIDDLE,
'MIDDLE': VispyButton.MIDDLE,
'LEFT': VispyButton.LEFT,
'RIGHT': VispyButton.RIGHT,
}
if isinstance(other, str):
return button_map.get(other.upper(), -1) in self._buttons
return self._buttons == other._buttons
def __neq__(self, other):
return not (self == other)
def __repr__(self):
fstr = ', '.join(self.buttons)
return "MouseButton({})".format(fstr)
__str__ = __repr__
class Key:
"""A higher level abstraction over a single key.
:param name: The name of the key; ENTER, BACKSPACE, etc.
:type name: str
:param text: The text associated with the given key. This
corresponds to the symbol that will be "typed" by the given
key.
:type name: str
"""
def __init__(self, name, text=''):
self.name = name.upper()
self.text = text
def __eq__(self, other):
if isinstance(other, str):
return other == self.name or other == self.text
return self.name == other.name and self.text == other.text
def __neq__(self, other):
return not (self == other)
def __str__(self):
if self.text.isalnum():
return self.text
else:
return self.name
def __repr__(self):
return "Key({})".format(self.name)
class Event:
"""A generic sketch event.
:param modifers: The set of modifiers held down at the time of the
event.
:type modifiers: str list
:param pressed: If the key/button is held down when the event
occurs.
:type pressed: bool
"""
def __init__(self, raw_event, active=False):
self._modifiers = list(map(lambda k: k.name, raw_event.modifiers))
self._active = active
self._raw = raw_event
@property
def modifiers(self):
return self._modifiers
@property
def pressed(self):
return self._active
def is_shift_down(self):
"""Was shift held down during the event?
:returns: True if the shift-key was held down.
:rtype: bool
"""
return 'Shift' in self._modifiers
def is_ctrl_down(self):
"""Was ctrl (command on Mac) held down during the event?
:returns: True if the ctrl-key was held down.
:rtype: bool
"""
return 'Control' in self._modifiers
def is_alt_down(self):
"""Was alt held down during the event?
:returns: True if the alt-key was held down.
:rtype: bool
"""
return 'Alt' in self._modifiers
def is_meta_down(self):
"""Was the meta key (windows/option key) held down?
:returns: True if the meta-key was held down.
:rtype: bool
"""
return 'Meta' in self._modifiers
def _update_builtins(self):
pass
class KeyEvent(Event):
"""Encapsulates information about a key event.
:param key: The key associated with this event.
:type key: str
:param pressed: Specifies whether the key is held down or not.
:type pressed: bool
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._raw.key is not None:
self.key = Key(self._raw.key.name, self._raw.text)
else:
self.key = Key('UNKNOWN')
def _update_builtins(self):
builtins.key_is_pressed = self.pressed
builtins.key = self.key if self.pressed else None
class MouseEvent(Event):
"""A class that encapsulates information about a mouse event.
:param x: The x-position of the mouse in the window at the time of
the event.
:type x: int
:param y: The y-position of the mouse in the window at the time of
the event.
:type y: int
:param position: Position of the mouse in the window at the time
of the event.
:type position: (int, int)
:param change: the change in the x and y directions (defaults to
(0, 0))
:type change: (int, int)
:param scroll: the scroll amount in the x and y directions
(defaults to (0, 0)).
:type scroll: (int, int)
:param count: amount by which the mouse whell was dragged.
:type count: int
:param button: Button information at the time of the event.
:type button: MouseButton
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
x, y = self._raw.pos
x = max(min(builtins.width, x), 0)
y = max(min(builtins.height, builtins.height - y), 0)
dx, dy = self._raw.delta
if (self._raw.press_event is not None) and (
self._raw.last_event is not None):
px, py = self._raw.press_event.pos
cx, cy = self._raw.last_event.pos
self.change = Position(cx - px, cy - py)
else:
self.change = Position(0, 0)
self.x = max(min(builtins.width, x), 0)
self.y = max(min(builtins.height, builtins.height - y), 0)
self.position = Position(x, y)
self.scroll = Position(int(dx), int(dy))
self.count = self.scroll.y
self.button = MouseButton(self._raw.buttons)
def _update_builtins(self):
builtins.pmouse_x = builtins.mouse_x
builtins.pmouse_y = builtins.mouse_y
builtins.mouse_x = self.x
builtins.mouse_y = self.y
builtins.mouse_is_pressed = self._active
builtins.mouse_is_dragging = (self.change == (0, 0))
builtins.mouse_button = self.button if self.pressed else None
def __repr__(self):
press = 'pressed' if self.pressed else 'not-pressed'
return "MouseEvent({} at {})".format(press, self.position)
__str__ = __repr__
|
p5py/p5
|
p5/sketch/events.py
|
Python
|
gpl-3.0
| 7,538
|
import pyaf.Bench.web_traffic.Forecaster as fo
PROJECTS = ['en.wikipedia.org']
data_dir = 'data/web-traffic-time-series-forecasting'
lForecaster = fo.cProjectForecaster()
lForecaster.mDataDirectory = data_dir
lForecaster.mBackendName = 'pyaf_hierarchical_top_down'
lForecaster.mKeysFileName = 'key_1.csv.zip'
last_date = '2016-12-31'
horizon = 60
lForecaster.mKeysFileName = 'key_1.csv.zip'
lForecaster.forecast(PROJECTS, last_date , horizon)
|
antoinecarme/pyaf
|
tests/bench/web_traffic_jobs/en.wikipedia.org/test_web_traffic_en.wikipedia.org_pyaf_hierarchical_top_down.py
|
Python
|
bsd-3-clause
| 447
|
##
## Licensed to the Apache Software Foundation (ASF) under one
## or more contributor license agreements. See the NOTICE file
## distributed with this work for additional information
## regarding copyright ownership. The ASF licenses this file
## to you under the Apache License, Version 2.0 (the
## "License"); you may not use this file except in compliance
## with the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing,
## software distributed under the License is distributed on an
## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
## KIND, either express or implied. See the License for the
## specific language governing permissions and limitations
## under the License
##
"""
Qpid Dispatch Router management schema and config file parsing.
"""
import json
from pkgutil import get_data
from . import schema
from ..compat import JSON_LOAD_KWARGS
class QdSchema(schema.Schema):
"""
Qpid Dispatch Router management schema.
"""
CONFIGURATION_ENTITY = u"configurationEntity"
OPERATIONAL_ENTITY = u"operationalEntity"
def __init__(self):
"""Load schema."""
qd_schema = get_data('qpid_dispatch.management', 'qdrouter.json')
try:
super(QdSchema, self).__init__(**json.loads(qd_schema, **JSON_LOAD_KWARGS))
except Exception,e:
raise ValueError("Invalid schema qdrouter.json: %s" % e)
self.configuration_entity = self.entity_type(self.CONFIGURATION_ENTITY)
self.operational_entity = self.entity_type(self.OPERATIONAL_ENTITY)
def validate_add(self, attributes, entities):
"""
Check that listeners and connectors can only have role=inter-router if the router has
mode=interior.
"""
entities = list(entities) # Iterate twice
super(QdSchema, self).validate_add(attributes, entities)
entities.append(attributes)
inter_router = not_interior = None
for e in entities:
short_type = self.short_name(e['type'])
if short_type == "router" and e['mode'] != "interior":
not_interior = e['mode']
if short_type in ["listener", "connector"] and e['role'] == "inter-router":
inter_router = e
if not_interior and inter_router:
raise schema.ValidationError(
"role='inter-router' only allowed with router mode='interior' for %s." % inter_router)
def is_configuration(self, entity_type):
return entity_type and self.configuration_entity in entity_type.all_bases
def is_operational(self, entity_type):
return entity_type and self.operational_entity in entity_type.all_bases
|
lulf/qpid-dispatch
|
python/qpid_dispatch_internal/management/qdrouter.py
|
Python
|
apache-2.0
| 2,794
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from simplespamblocker.models import Option
class OptionAdmin(admin.ModelAdmin):
list_display = ('id', 'site', 'is_block_author', 'is_block_content',
'is_block_email', 'is_block_url',
'is_block_remote_addr', 'is_block_http_referer',
'is_block_http_user_agent', 'created_at', 'updated_at')
search_fields = ('block_author', 'block_content', 'block_email',
'block_url', 'block_remote_addr', 'block_http_referer',
'block_http_user_agent')
def is_block_author(self, obj):
return bool(obj.block_author)
def is_block_content(self, obj):
return bool(obj.block_content)
def is_block_email(self, obj):
return bool(obj.block_email)
def is_block_url(self, obj):
return bool(obj.block_url)
def is_block_http_referer(self, obj):
return bool(obj.block_http_referer)
def is_block_http_user_agent(self, obj):
return bool(obj.block_http_user_agent)
def is_block_remote_addr(self, obj):
return bool(obj.block_remote_addr)
is_block_author.short_description = _('block author')
is_block_content.short_description = _('block content')
is_block_email.short_description = _('block email')
is_block_url.short_description = _('block url')
is_block_http_referer.short_description = _('block http referer')
is_block_http_user_agent .short_description = _('block http user agent')
is_block_remote_addr.short_description = _('block IP')
is_block_author.boolean = True
is_block_content.boolean = True
is_block_email.boolean = True
is_block_url.boolean = True
is_block_http_referer.boolean = True
is_block_http_user_agent.boolean = True
is_block_remote_addr.boolean = True
admin.site.register(Option, OptionAdmin)
|
moqada/django-simple-spam-blocker
|
simplespamblocker/admin.py
|
Python
|
bsd-3-clause
| 1,957
|
#!/usr/bin/env python3
import urllib.request as req
import re
import os
from pathlib import Path
def main():
url = input().strip()
path = Path(input().strip())
opener = req.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
req.install_opener(opener)
page = req.urlopen(url).read().decode('utf-8')
links = re.findall(r'"([^"]+\.torrent)"', page)
for i, url in enumerate(links):
file_name = "torrent_" + str(i) + ".torrent"
req.urlretrieve(url, path / file_name)
print(url)
if __name__ == '__main__':
main()
|
garciparedes/python-examples
|
environment/scripting/torrent_crawler.py
|
Python
|
mpl-2.0
| 587
|
# -*- coding: utf-8 -*-
# Simple Bot (SimpBot)
# Copyright 2016-2017, Ismael Lugo (kwargs)
#
# Simpcoins! inspired by buttcoins
#---------------------------------
# configuration module
import random
from string import ascii_letters, digits, punctuation
def unsort(list):
"""Return a copy of unsorted list"""
new_list = []
for chance in range(len(list)):
char = random.choice(list)
list.remove(char)
new_list.append(char)
return new_list
entropy = 5
anti_flood = (20, 60) # (lines, time in seconds)
full_block = ascii_letters + digits
slct_block = list(full_block + (punctuation * entropy))
slct_block = ''.join(unsort(unsort(unsort(unsort(unsort(slct_block))))))
min_len = 10 # minimum of chars in line
p_increase = 400 # Percentage to increase
init_level = 100 # First level
default_coins = 0 # coins awarded by default
default_ecoins = 1 # Default earned coins
default_chances = 0 # Default possibilities for obtaining a part of the block
default_bp_size = 10 # Default maximum number of items that can be saved
hash_function = 'md5'
bot_account = 'Simp Bank'
dateformat = '[%X] %x'
max_entries = 5
columns_name = ['id column', 'sender column', 'receiver column',
'amount column', 'column date']
table_format = {
#'vertical_char': ' ',
#'junction_char': ' ',
#'horizontal_char': ' ',
'border': False,
'print_empty': False,
'header_style': 'upper'}
|
IsmaelRLG/simpbot
|
extra/simpcoins/config.py
|
Python
|
mit
| 1,465
|
import csv
import json
import tempfile
from fs.memoryfs import MemoryFS
import numpy as np
fsys = MemoryFS()
fsys.makedir('raw_data')
fsys.makedir('result_data')
fsys.makedir('result_data/dataset_200_step/')
pure_path = 'result_data/dataset_200_step/'
fsys.create(pure_path + 'a.tfrecord')
fsys.create(pure_path + 'b.tfrecord')
fsys.create(pure_path + 'log.log')
fsys.tree()
fsys.removetree(pure_path)
fsys.tree()
|
chenmich/learn_rnn_tensorflow
|
learn_fs.py
|
Python
|
apache-2.0
| 415
|
def join(a, *b):
return a + "/" + "/".join(b)
def normpath(x):
return x
|
github/codeql
|
python/ql/test/query-tests/Security/lib/os/path.py
|
Python
|
mit
| 81
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
from absl.testing import absltest
from acme.jax import utils
import chex
import jax
import jax.numpy as jnp
import numpy as np
chex.set_n_cpu_devices(4)
class JaxUtilsTest(absltest.TestCase):
def test_batch_concat(self):
batch_size = 32
inputs = [
jnp.zeros(shape=(batch_size, 2)),
{
'foo': jnp.zeros(shape=(batch_size, 5, 3))
},
[jnp.zeros(shape=(batch_size, 1))],
jnp.zeros(shape=(batch_size,)),
]
output_shape = utils.batch_concat(inputs).shape
expected_shape = [batch_size, 2 + 5 * 3 + 1 + 1]
self.assertSequenceEqual(output_shape, expected_shape)
def test_mapreduce(self):
@utils.mapreduce
def f(y, x):
return jnp.square(x + y)
z = f(jnp.ones(shape=(32,)), jnp.ones(shape=(32,)))
z = jax.device_get(z)
self.assertEqual(z, 4)
def test_get_from_first_device(self):
sharded = {
'a':
jax.device_put_sharded(
list(jnp.arange(16).reshape([jax.local_device_count(), 4])),
jax.local_devices()),
'b':
jax.device_put_sharded(
list(jnp.arange(8).reshape([jax.local_device_count(), 2])),
jax.local_devices(),
),
}
want = {
'a': jnp.arange(4),
'b': jnp.arange(2),
}
# Get zeroth device content as DeviceArray.
device_arrays = utils.get_from_first_device(sharded, as_numpy=False)
jax.tree_map(
lambda x: self.assertIsInstance(x, jax.xla.DeviceArray),
device_arrays)
jax.tree_map(np.testing.assert_array_equal, want, device_arrays)
# Get the zeroth device content as numpy arrays.
numpy_arrays = utils.get_from_first_device(sharded, as_numpy=True)
jax.tree_map(lambda x: self.assertIsInstance(x, np.ndarray), numpy_arrays)
jax.tree_map(np.testing.assert_array_equal, want, numpy_arrays)
def test_get_from_first_device_fails_if_sda_not_provided(self):
with self.assertRaises(ValueError):
utils.get_from_first_device({'a': np.arange(jax.local_device_count())})
if __name__ == '__main__':
absltest.main()
|
deepmind/acme
|
acme/jax/utils_test.py
|
Python
|
apache-2.0
| 2,776
|
import random
import sys
def Cartas():
return 4*["A",2,3,4,5,6,7,8,9,10,"J","Q","K"]
def Usuario(usuario,baraja):
usuario.append( baraja.pop(0))
usuario.append( baraja.pop(0))
print("Tu mano es:")
print (usuario)
return usuario
def Maquina(maquina, baraja):
maquina.append(baraja.pop(0))
maquina.append(baraja.pop(0))
return maquina
def JQK (mano,numero):
mano.append(numero)
if mano[0]!=0:
if mano[0]== "J" or mano[0]=="Q" or mano[0]== "K":
mano.pop(0)
return JQK(mano, 10)
elif mano[0]== "A":
if(mano[0]=="A" and mano[1]=="A"):
mano.pop(0)
JQK(mano,1)
mano.pop(0)
return JQK(mano, 11)
else:
return JQK(mano, mano.pop(0))
else:
mano.pop(0)
return mano
def comprobarA (mano,numero):
mano.append(numero)
if(mano[0]!=0):
if mano[0]==11 or mano[0]=="A":
mano.pop(0)
return comprobarA(mano,1)
else:
return comprobarA(mano,mano.pop(0))
else:
mano.pop(0)
return mano
def RobarCarta(suma,baraja):
if sumaCartas(suma,0)<=21:
print ("\nLa carta repartida es: ")
print baraja[0]
suma.append(baraja.pop(0))
print ("La nueva mano es: ")
print suma
JQK(suma,0)
def sumaCartas(lista,numero):
lista.append(numero)
if lista[0]!=0 :
return lista[0]+sumaCartas(lista,lista.pop(0))
else:
lista.pop(0)
return 0
def compararSumas(maquina,mano,Quedarse,baraja):
print "La suma de tu mano:"
print (sumaCartas(mano,0))
if(sumaCartas(mano,0)<=21 and sumaCartas(maquina,0)<=21):
if(Quedarse!=True and input("\n1. Pedir carta\n2. Plantarse\n\nIngrese la opcion:")==1 ):
RobarCarta(mano,baraja)
compararSumas(maquina,mano,False,baraja)
elif (sumaCartas(mano,0)>sumaCartas(maquina,0) and sumaCartas(maquina,0)<21):
RobarCarta(maquina,baraja)
compararSumas(maquina,mano,True,baraja)
elif (sumaCartas(mano,0)<sumaCartas(maquina,0) or sumaCartas(mano,0)==sumaCartas(maquina,0)):
print("La mano de la maquina es: ")
print (maquina)
print "La suma de la mano del pc es:"
print (sumaCartas(maquina,0))
print("Perdiste D:")
else:
if (sumaCartas(maquina,0)<21):
if (sumaCartas(comprobarA(mano,0),0)<21):
compararSumas(maquina,mano,False,baraja)
else:
print("La mano de la maquina es: ")
print (maquina)
print "La suma de la mano del pc es:"
print (sumaCartas(maquina,0))
print("Perdiste D:")
else:
if(sumaCartas(comprobarA(maquina,0),0)<21):
compararSumas(maquina,mano,True,baraja)
else:
print("La mano de la maquina es: ")
print (maquina)
print "La suma de la mano del pc es:"
print (sumaCartas(maquina,0))
print ("GANASTE :D")
def main(baraja):
random.shuffle(baraja)
compararSumas(JQK(Maquina([],baraja),0),JQK(Usuario([],baraja),0),False,baraja)
sys.exit(0)
main(baraja)
main(Cartas())
|
afsolanoc95/programapython
|
recursividad21.py
|
Python
|
gpl-3.0
| 3,450
|
import os
import webapp2
from actions import cronActions
from views import views
import secrets
SECS_PER_WEEK = 60 * 60 * 24 * 7
# Enable ctypes -> Jinja2 tracebacks
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
ROOT_DIRECTORY = os.path.dirname(__file__)
if not PRODUCTION_MODE:
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src')
else:
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist')
curr_path = os.path.abspath(os.path.dirname(__file__))
config = {
'webapp2_extras.sessions': {
'secret_key': secrets.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
'cookie_name': 'echo_sense_session'
},
'webapp2_extras.jinja2': {
'template_path': TEMPLATE_DIRECTORY
}
}
app = webapp2.WSGIApplication(
[
# Cron jobs (see cron.yaml)
webapp2.Route('/cron/monthly', handler=cronActions.Monthly),
webapp2.Route(r'/<:.*>', handler=views.ActionPotentialApp, name="ActionPotentialApp"),
], debug=True, config=config)
|
onejgordon/action-potential
|
actionpotential.py
|
Python
|
mit
| 1,267
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests common to all coder implementations."""
import logging
import unittest
try:
from pyflink.fn_execution import coder_impl_fast
from pyflink.fn_execution.beam import beam_coder_impl_slow as coder_impl
have_cython = True
except ImportError:
have_cython = False
@unittest.skipUnless(have_cython, "Uncompiled Cython Coder")
class CodersTest(unittest.TestCase):
def check_cython_coder(self, python_field_coders, cython_field_coders, data):
from apache_beam.coders.coder_impl import create_InputStream, create_OutputStream
from pyflink.fn_execution.beam.beam_stream import BeamInputStream, BeamOutputStream
py_flatten_row_coder = coder_impl.FlattenRowCoderImpl(python_field_coders)
internal = py_flatten_row_coder.encode(data)
beam_input_stream = create_InputStream(internal)
input_stream = BeamInputStream(beam_input_stream, beam_input_stream.size())
beam_output_stream = create_OutputStream()
cy_flatten_row_coder = coder_impl_fast.FlattenRowCoderImpl(cython_field_coders)
value = cy_flatten_row_coder.decode_from_stream(input_stream)
output_stream = BeamOutputStream(beam_output_stream)
cy_flatten_row_coder.encode_to_stream(value, output_stream)
output_stream.flush()
generator_result = py_flatten_row_coder.decode_from_stream(create_InputStream(
beam_output_stream.get()), False)
result = []
for item in generator_result:
result.append(item)
try:
self.assertEqual(result, data)
except AssertionError:
self.assertEqual(len(result), len(data))
self.assertEqual(len(result[0]), len(data[0]))
for i in range(len(data[0])):
if isinstance(data[0][i], float):
from pyflink.table.tests.test_udf import float_equal
assert float_equal(data[0][i], result[0][i], 1e-6)
else:
self.assertEqual(data[0][i], result[0][i])
# decide whether two floats are equal
@staticmethod
def float_equal(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_cython_bigint_coder(self):
data = [1, 100, -100, -1000]
python_field_coders = [coder_impl.BigIntCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.BigIntCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_tinyint_coder(self):
data = [1, 10, 127, -128]
python_field_coders = [coder_impl.TinyIntCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.TinyIntCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_boolean_coder(self):
data = [True, False]
python_field_coders = [coder_impl.BooleanCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.BooleanCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_smallint_coder(self):
data = [32767, -32768, 0]
python_field_coders = [coder_impl.SmallIntCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.SmallIntCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_int_coder(self):
data = [-2147483648, 2147483647]
python_field_coders = [coder_impl.IntCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.IntCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_float_coder(self):
data = [1.02, 1.32]
python_field_coders = [coder_impl.FloatCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.FloatCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_double_coder(self):
data = [-12.02, 1.98932]
python_field_coders = [coder_impl.DoubleCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.DoubleCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_binary_coder(self):
data = [b'pyflink', b'x\x00\x00\x00']
python_field_coders = [coder_impl.BinaryCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.BinaryCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_char_coder(self):
data = ['flink', '🐿']
python_field_coders = [coder_impl.CharCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.CharCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_date_coder(self):
import datetime
data = [datetime.date(2019, 9, 10)]
python_field_coders = [coder_impl.DateCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.DateCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_time_coder(self):
import datetime
data = [datetime.time(hour=11, minute=11, second=11, microsecond=123000)]
python_field_coders = [coder_impl.TimeCoderImpl() for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.TimeCoderImpl() for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_decimal_coder(self):
import decimal
data = [decimal.Decimal('0.00001'), decimal.Decimal('1.23E-8')]
python_field_coders = [coder_impl.DecimalCoderImpl(38, 18) for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.DecimalCoderImpl(38, 18) for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
decimal.getcontext().prec = 2
data = [decimal.Decimal('1.001')]
python_field_coders = [coder_impl.DecimalCoderImpl(4, 3) for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.DecimalCoderImpl(4, 3) for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
self.assertEqual(decimal.getcontext().prec, 2)
def test_cython_timestamp_coder(self):
import datetime
data = [datetime.datetime(2019, 9, 10, 18, 30, 20, 123000)]
python_field_coders = [coder_impl.TimestampCoderImpl(3) for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.TimestampCoderImpl(3) for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
data = [datetime.datetime(2019, 9, 10, 18, 30, 20, 123456)]
python_field_coders = [coder_impl.TimestampCoderImpl(6) for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.TimestampCoderImpl(6) for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_local_zoned_timestamp_coder(self):
import datetime
import pytz
timezone = pytz.timezone("Asia/Shanghai")
data = [timezone.localize(datetime.datetime(2019, 9, 10, 18, 30, 20, 123000))]
python_field_coders = [coder_impl.LocalZonedTimestampCoderImpl(3, timezone)
for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.LocalZonedTimestampCoderImpl(3, timezone)
for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
data = [timezone.localize(datetime.datetime(2019, 9, 10, 18, 30, 20, 123456))]
python_field_coders = [coder_impl.LocalZonedTimestampCoderImpl(6, timezone)
for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.LocalZonedTimestampCoderImpl(6, timezone)
for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_array_coder(self):
data = [[1, 2, 3, None]]
python_field_coders = [coder_impl.ArrayCoderImpl(coder_impl.BigIntCoderImpl())
for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.ArrayCoderImpl(coder_impl_fast.BigIntCoderImpl())
for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_map_coder(self):
data = [{'flink': 1, 'pyflink': 2, 'coder': None}]
python_field_coders = [coder_impl.MapCoderImpl(coder_impl.CharCoderImpl(),
coder_impl.BigIntCoderImpl())
for _ in range(len(data))]
cython_field_coders = [coder_impl_fast.MapCoderImpl(coder_impl_fast.CharCoderImpl(),
coder_impl_fast.BigIntCoderImpl())
for _ in range(len(data))]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
def test_cython_row_coder(self):
from pyflink.table import Row
field_count = 2
data = [Row(*[None if i % 2 == 0 else i for i in range(field_count)])]
python_field_coders = [coder_impl.RowCoderImpl([coder_impl.BigIntCoderImpl()
for _ in range(field_count)])]
cython_field_coders = [coder_impl_fast.RowCoderImpl([coder_impl_fast.BigIntCoderImpl()
for _ in range(field_count)])]
self.check_cython_coder(python_field_coders, cython_field_coders, [data])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
tzulitai/flink
|
flink-python/pyflink/fn_execution/tests/test_fast_coders.py
|
Python
|
apache-2.0
| 11,430
|
# -*- coding: utf-8 -*-
#
# Kate/Pâté plugins to work with C++ code formatting
# Copyright 2010-2013 by Alex Turbov <i.zaufi@gmail.com>
#
#
# This software is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
'''Plugins to work with C++ code formatting'''
from PyKDE4.kdecore import i18nc
from PyKDE4.ktexteditor import KTextEditor
from PyKDE4.kdeui import KXMLGUIClient
import kate
import kate.ui
import kate.view
from libkatepate.decorators import *
from libkatepate import selection
def getLeftNeighbour(lineStr, column):
if column:
return lineStr[column - 1]
return None
def getRightNeighbour(lineStr, column):
if (column + 1) < len(lineStr):
return lineStr[column + 1]
return None
def looksLikeTemplateAngelBracket(lineStr, column):
''' Check if a symbol at given position looks like a template angel bracket
'''
assert(lineStr[column] in '<>')
#kate.kDebug("?LLTAB: ch='" + lineStr[column] + "'")
ln = getLeftNeighbour(lineStr, column)
#kate.kDebug("?LLTAB: ln='" + str(ln) + "'")
rn = getRightNeighbour(lineStr, column)
#kate.kDebug("?LLTAB: rn='" + str(rn) + "'")
# Detect possible template
if lineStr[column] == '<': # --[current char is '<']-------
if ln == '<' or rn == '<': # "<<" in any place on a line...
return False # ... can't be a template!
if ln == ' ' and rn == '=': # " <="
return False # operator<=()
if lineStr[0:column].strip().startswith('template'):# template declaration at the start of line
return True # ... possible smth like `template < typename ...'
if ln == ' ' and rn == ' ': # " < "
return False # operator<()
return True
if lineStr[column] == '>': # --[current char is '>']-------
if lineStr.strip().startswith('>'): # line starts w/ one or more '>'
return True # ... can be end of formatted `typedef <...\n> type;' for example
if ln == ' ' and rn == ' ': # " > "
return False # operator>()
if ln == ' ' and rn == '=': # ">="
return False # operator>=()
if ln == '-':
return False # operator->()
return True
pass
#
# TODO Probably decorators may help to simplify this code ???
#
def getRangeTopology(breakChars):
'''Get range opened w/ `openCh' and closed w/ `closeCh'
@return tuple w/ current range, list of nested ranges
and list of positions of break characters
@note Assume cursor positioned whithin that range already.
'''
document = kate.activeDocument()
view = kate.activeView()
pos = view.cursorPosition()
stack = list()
nestedRanges = list()
breakPositions = list()
firstIteration = True
found = False
# Iterate from the current line towards a document start
for cl in range(pos.line(), -1, -1):
lineStr = str(document.line(cl))
if not firstIteration: # skip first iteration
pos.setColumn(len(lineStr)) # set current column to the end of current line
else:
firstIteration = False # do nothing on first iteration
# Iterate from the current column to a line start
for cc in range(pos.column() - 1, -1, -1):
#kate.kDebug("c: current position" + str(cl) + "," + str(cc) + ",ch='" + lineStr[cc] + "'")
# Check open/close brackets
if lineStr[cc] == ')': # found closing char: append its position to the stack
stack.append((cl, cc, False))
#kate.kDebug("o( Add position: " + str(stack[-1]))
continue
if lineStr[cc] == '(': # found open char...
if len(stack): # if stack isn't empty (i.e. there are some closing chars met)
#kate.kDebug("o( Pop position: " + str(stack[-1]))
nrl, nrc, isT = stack.pop() # remove last position from the stack
if not isT:
nestedRanges.append( # and append a nested range
KTextEditor.Range(cl, cc, nrl, nrc)
)
else:
raise LookupError(
i18nc(
'@info'
, 'Misbalanced brackets: at <numid>%1</numid>,<numid>%2</numid> and <numid>%3</numid>,<numid>%4</numid>'
, cl + 1, cc + 1, nrl + 1, nrc + 1
)
)
else: # otherwise,
openPos = (cl, cc + 1, False) # remember range start (exclude an open char)
#kate.kDebug("o( Found position: " + str(openPos))
found = True
break
continue
# Check for template angel brackets
if lineStr[cc] == '>':
if looksLikeTemplateAngelBracket(lineStr, cc):
stack.append((cl, cc, True))
#kate.kDebug("o< Add position: " + str(stack[-1]))
#else:
#kate.kDebug("o< Doesn't looks like template: " + str(cl) + "," + str(cc))
continue
if lineStr[cc] == '<':
if not looksLikeTemplateAngelBracket(lineStr, cc):
#kate.kDebug("o< Doesn't looks like template: " + str(cl) + "," + str(cc + 1))
pass
elif len(stack): # if stack isn't empty (i.e. there are some closing chars met)
#kate.kDebug("o< Pop position: " + str(stack[-1]))
nrl, nrc, isT = stack.pop() # remove last position from the stack
if isT:
nestedRanges.append( # and append a nested range
KTextEditor.Range(cl, cc, nrl, nrc)
)
else:
raise LookupError(
i18nc(
'@info'
, 'Misbalanced brackets: at <numid>%1</numid>,<numid>%2</numid> and <numid>%3</numid>,<numid>%4</numid>'
, cl + 1, cc + 1, nrl + 1, nrc + 1
)
)
else:
openPos = (cl, cc + 1, True) # remember range start (exclude an open char)
#kate.kDebug("o< Found position: " + str(openPos))
found = True
break
continue
if lineStr[cc] in breakChars and len(stack) == 0:
breakPositions.append(KTextEditor.Cursor(cl, cc))
# Did we found smth on the current line?
if found:
break # Yep! Break the outer loop
if not found:
return (KTextEditor.Range(), list(), list()) # Return empty ranges if nothing found
assert(len(stack) == 0) # stack expected to be empty!
breakPositions.reverse() # reverse breakers list required cuz we found 'em in a reverse order :)
# Iterate from the current position towards the end of a document
pos = view.cursorPosition() # get current cursor position again
firstIteration = True
found = False
for cl in range(pos.line(), document.lines()):
lineStr = str(document.line(cl))
if not firstIteration: # skip first iteration
pos.setColumn(0) # set current column to the start of current line
else:
firstIteration = False # do nothing on first iteration
for cc in range(pos.column(), len(lineStr)):
#kate.kDebug("c: current position" + str(cl) + "," + str(cc) + ",ch='" + lineStr[cc] + "'")
# Check open/close brackets
if lineStr[cc] == '(':
stack.append((cl, cc, False))
#kate.kDebug("c) Add position: " + str(stack[-1]))
continue
if lineStr[cc] == ')':
if len(stack):
#kate.kDebug("c) Pop position: " + str(stack[-1]))
nrl, nrc, isT = stack.pop() # remove a last position from the stack
if not isT:
nestedRanges.append( # and append a nested range
KTextEditor.Range(nrl, nrc, cl, cc)
)
else:
raise LookupError(
i18nc(
'@info'
, 'Misbalanced brackets: at <numid>%1</numid>,<numid>%2</numid> and <numid>%3</numid>,<numid>%4</numid>'
, nrl + 1, nrc + 1, cl + 1, cc + 1
)
)
else:
closePos = (cl, cc, False) # remember the range end
#kate.kDebug("c) Found position: " + str(closePos))
found = True
break
continue
# Check for template angel brackets
if lineStr[cc] == '<':
if looksLikeTemplateAngelBracket(lineStr, cc):
stack.append((cl, cc, True))
#kate.kDebug("c> Add position: " + str(stack[-1]))
#else:
#kate.kDebug("c> Doesn't looks like template: " + str(cl) + "," + str(cc))
continue
if lineStr[cc] == '>':
if not looksLikeTemplateAngelBracket(lineStr, cc):
#kate.kDebug("c> Doesn't looks like template: " + str(cl) + "," + str(cc))
pass
elif len(stack): # if stack isn't empty (i.e. there are some closing chars met)
#kate.kDebug("c> Pop position: " + str(stack[-1]))
nrl, nrc, isT = stack.pop() # remove last position from the stack
if isT:
nestedRanges.append( # and append a nested range
KTextEditor.Range(cl, cc, nrl, nrc)
)
else:
raise LookupError(
i18nc(
'@info'
, 'Misbalanced brackets: at <numid>%1</numid>,<numid>%2</numid> and <numid>%3</numid>,<numid>%4</numid>'
, nrl + 1, nrc + 1, cl + 1, cc + 1
)
)
else:
closePos = (cl, cc, True) # remember the range end
kate.kDebug("c> Found position: " + str(closePos))
found = True
break
continue
if lineStr[cc] in breakChars and len(stack) == 0:
breakPositions.append(KTextEditor.Cursor(cl, cc))
# Did we found smth on the current line?
if found:
break # Yep! Break the outer loop
if not found:
return (KTextEditor.Range(), list(), list()) # Return empty ranges if nothing found
assert(len(stack) == 0) # stack expected to be empty!
if openPos[2] != closePos[2]:
raise LookupError(
i18nc(
'@info'
, 'Misbalanced brackets: at <numid>%1</numid>,<numid>%2</numid> and <numid>%3</numid>,<numid>%4</numid>'
, openPos[0] + 1, openPos[1] + 1, closePos[0] + 1, closePos[1] + 1
)
)
return (KTextEditor.Range(openPos[0], openPos[1], closePos[0], closePos[1]), nestedRanges, breakPositions)
def boostFormatText(textRange, indent, breakPositions):
document = kate.activeDocument()
originalText = document.text(textRange)
#kate.kDebug("Original text:\n'" + originalText + "'")
# Slice text whithin a given range into pieces to be realigned
ranges = list()
prevPos = textRange.start()
breakCh = None
indentStr = ' ' * (indent + 2);
breakPositions.append(textRange.end())
for b in breakPositions:
#kate.kDebug("* prev pos: " + str(prevPos.line()) + ", " + str(prevPos.column()))
#kate.kDebug("* current pos: " + str(b.line()) + ", " + str(b.column()))
chunk = (document.text(KTextEditor.Range(prevPos, b))).strip()
#kate.kDebug("* current chunk:\n'" + chunk + "'")
t = ('\n ').join(chunk.splitlines())
#kate.kDebug("* current line:\n'" + t + "'")
if breakCh:
outText += indentStr + breakCh + ' ' + t + '\n'
else:
outText = '\n' + indentStr + ' ' + t + '\n'
breakCh = document.character(b)
prevPos = KTextEditor.Cursor(b.line(), b.column() + 1)
outText += indentStr
#kate.kDebug("Out text:\n'" + outText + "'")
if outText != originalText:
document.startEditing()
document.replaceText(textRange, outText)
document.endEditing()
@kate.action
@check_constraints
@selection_mode(selection.NORMAL)
def boostFormat():
'''Format function's/template's parameters list (or `for`'s) in a boost-like style
I.e. when 2nd and the rest parameters has leading comma/semicolon
and closing ')' or '>' on a separate line.
THIS IS REALLY BETTER TO HAVE SUCH STYLE WHEN U HAVE A LONG PARAMETERS LIST!
'''
document = kate.activeDocument()
view = kate.activeView()
try:
r, nestedRanges, breakPositions = getRangeTopology(',')
except LookupError as error:
kate.ui.popup(
i18nc('@title:window', 'Alert')
, i18nc(
'@info:tooltip'
, 'Failed to parse C++ expression:<nl/><message>%1</message>', error
)
, 'dialog-information'
)
return
if r.isEmpty(): # Is range empty?
kate.ui.popup(
i18nc('@title:window', 'Alert')
, i18nc(
'@info:tooltip'
, 'Failed to parse C++ expression:<nl/><message>%1</message>'
, i18nc('@info:tooltip', "Did not find anything to format")
)
, 'dialog-information'
)
return # Nothing interesting wasn't found...
# Rescan the range w/ ';' as breaker added if current range is a `for` statement
if document.line(r.start().line())[0:r.start().column() - 1].rstrip().endswith('for'):
try:
r, nestedRanges, breakPositions = getRangeTopology(',;')
except LookupError as error:
kate.ui.popup(
i18nc('@title:window', 'Alert')
, i18nc(
'@info:tooltip'
, 'Failed to parse C++ expression:<nl/><message>%1</message>', error
)
, 'dialog-information'
)
return
# Going to format a text whithin a selected range
lineStr = document.line(r.start().line())
lineStrStripped = lineStr.lstrip()
indent = len(lineStr) - len(lineStrStripped)
if lineStrStripped.startswith(', '):
indent += 2
text = boostFormatText(r, indent, breakPositions)
def boostUnformatText(textRange, breakPositions):
document = kate.activeDocument()
originalText = document.text(textRange)
#kate.kDebug("Original text:\n'" + originalText + "'")
# Join text within a selected range
prevPos = textRange.start()
outText = ''.join([line.strip() for line in originalText.splitlines()])
#kate.kDebug("Out text:\n'" + outText + "'")
if outText != originalText:
document.startEditing()
document.replaceText(textRange, outText)
document.endEditing()
@kate.action
@check_constraints
@selection_mode(selection.NORMAL)
def boostUnformat():
'''Merge everything between '(' and ')' into a single line'''
document = kate.activeDocument()
view = kate.activeView()
try:
r, nestedRanges, breakPositions = getRangeTopology(',')
except LookupError as error:
kate.ui.popup(
i18nc('@title:window', 'Alert')
, i18nc(
'@info:tooltip'
, 'Failed to parse C++ expression:<nl/><message>%1</message>', error
)
, 'dialog-information'
)
return
if r.isEmpty(): # Is range empty?
kate.ui.popup(
i18nc('@title:window', 'Alert')
, i18nc(
'@info:tooltip'
, 'Failed to parse C++ expression:<nl/><message>%1</message>'
, i18nc('@info:tooltip', "Did not find anything to format")
)
, 'dialog-information'
)
return # Nothing interesting wasn't found...
# Rescan the range w/ ';' as breaker added if current range is a `for` statement
if document.line(r.start().line())[0:r.start().column() - 1].rstrip().endswith('for'):
try:
r, nestedRanges, breakPositions = getRangeTopology(',;')
except LookupError as error:
kate.ui.popup(
i18nc('@title:window', 'Alert')
, i18nc(
'@info:tooltip'
, 'Failed to parse C++ expression:<nl/><message>%1</message>', error
)
, 'dialog-information'
)
return
# Going to unformat a text whithin a selected range
text = boostUnformatText(r, breakPositions)
@kate.view.selectionChanged
def toggleSelectionSensitiveActions(view):
clnt = kate.getXmlGuiClient()
if not view.selection():
clnt.stateChanged('has_no_selection')
else:
clnt.stateChanged('has_no_selection', KXMLGUIClient.StateReverse)
|
hlamer/kate
|
addons/kate/pate/src/plugins/format.py
|
Python
|
lgpl-2.1
| 19,591
|
from awxkit.api.mixins import HasCreate, HasInstanceGroups, HasNotifications, DSAdapter
from awxkit.utils import random_title, suppress, PseudoNamespace
from awxkit.api.resources import resources
import awxkit.exceptions as exc
from . import base
from . import page
class Organization(HasCreate, HasInstanceGroups, HasNotifications, base.Base):
NATURAL_KEY = ('name',)
def add_admin(self, user):
if isinstance(user, page.Page):
user = user.json
with suppress(exc.NoContent):
self.related.admins.post(user)
def add_user(self, user):
if isinstance(user, page.Page):
user = user.json
with suppress(exc.NoContent):
self.related.users.post(user)
def payload(self, **kwargs):
payload = PseudoNamespace(name=kwargs.get('name') or 'Organization - {}'.format(random_title()),
description=kwargs.get('description') or random_title(10))
return payload
def create_payload(self, name='', description='', **kwargs):
payload = self.payload(name=name, description=description, **kwargs)
payload.ds = DSAdapter(self.__class__.__name__, self._dependency_store)
return payload
def create(self, name='', description='', **kwargs):
payload = self.create_payload(name=name, description=description, **kwargs)
return self.update_identity(Organizations(self.connection).post(payload))
page.register_page([resources.organization,
(resources.organizations, 'post')], Organization)
class Organizations(page.PageList, Organization):
pass
page.register_page([resources.organizations,
resources.user_organizations,
resources.project_organizations], Organizations)
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/organizations.py
|
Python
|
apache-2.0
| 1,807
|
# -*- coding: utf-8 -*-
"""
test_gluster_bridge
----------------------------------
Tests for `gluster_bridge` module.
"""
from tendrl.gluster_bridge.tests import base
class TestGluster_bridge(base.TestCase):
def test_something(self):
assert True
|
shtripat/gluster_bridge
|
tendrl/gluster_bridge/tests/test_gluster_bridge.py
|
Python
|
lgpl-2.1
| 265
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2013 ccheng <ccheng@cchengs-MacBook-Pro.local>
#
# Distributed under terms of the MIT license.
"""
"""
import urllib
from one_server import create_app, mongo
import one_server
import json
from bson.json_util import dumps
app = create_app('one_server.settings.DevConfig', env='dev')
test_app = app.test_client()
context = app.test_request_context('/')
context.push()
#clear all data.
mongo.db.user.remove()
mongo.db.comment.remove()
mongo.db.ride.remove()
mongo.db.passenger.remove()
one_server.init_db()
token = str(mongo.db.user.find_one({'nickname': 'asdf'})['_id'])
#This user is used to test some function need two users.
token2 = str(mongo.db.user.find_one({'nickname': 'fdsa'})['_id'])
def make_url_end(url, params):
if not params.get('token'):
params['token'] = token
if params:
return '%s?%s' % (url, urllib.urlencode(params))
else:
return url
def parse_json(raw):
try:
return json.loads(raw)
except:
raise Exception("%s not valid json string" % raw)
def to_dict(c):
"""Convert mongodb cursor object to python dict object"""
return json.loads(dumps(c))
class TestBase(object):
def post(self, end, params):
if not params.get('token'):
params['token'] = token
rv = test_app.post(end, data=params)
return parse_json(rv.data), rv.status_code
def get(self, end, params={}):
rv = test_app.get(make_url_end(end, params))
return parse_json(rv.data), rv.status_code
def get_result(self, end, params={}):
rv = test_app.get(make_url_end(end, params))
return parse_json(rv.data).get('result'), rv.status_code
def get_token_user(self):
data, status_code = self.get('specific_user', {"id": token})
assert status_code == 200
return data['result']
def insert_ride_item(uid=token):
lat = 39.983424 + float(1)/(10**4)
lng = 116.322987 + float(1)/(10**4)
params = {
'title': 't',
'start_off_time': '1922-02-01 21:22',
'wait_time': '1922-02-01 21:22',
'start_lat': lat,
'start_lng': lng,
'dest_lat': lat,
'dest_lng': lng,
'price': 2,
'people': 2,
'car_type': 1,
'comment': 'asdf',
'debug': 1,
'car_type': 1,
'token': uid,
}
rv = test_app.post('rides', data=params)
assert rv.status_code == 200
def insert_passenger_item(uid=token):
lat = 39.983424 + float(1)/(10**4)
lng = 116.322987 + float(1)/(10**4)
params = {
'title': u'阿斯蒂芬',
'start_off_time': '1922-02-01 21:22',
'wait_time': '1',
'start_lat': lat,
'start_lng': lng,
'dest_lat': lat,
'dest_lng': lng,
'price': 2,
'people': 2,
'comment': u'撒旦法士大夫',
'debug': 1,
'token': uid,
}
rv = test_app.post('passengers', data=params)
assert rv.status_code == 200
|
ufo22940268/one-server
|
tests/base.py
|
Python
|
bsd-2-clause
| 3,141
|
import os
from setuptools import setup
import sys
if sys.version_info < (2, 6):
raise Exception('Wiggelen requires Python 2.6 or higher.')
install_requires = []
# Python 2.6 does not include the argparse module.
try:
import argparse
except ImportError:
install_requires.append('argparse')
# Python 2.6 does not include OrderedDict.
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict')
try:
with open('README.rst') as readme:
long_description = readme.read()
except IOError:
long_description = 'See https://pypi.python.org/pypi/wiggelen'
# This is quite the hack, but we don't want to import our package from here
# since that's recipe for disaster (it might have some uninstalled
# dependencies, or we might import another already installed version).
distmeta = {}
for line in open(os.path.join('wiggelen', '__init__.py')):
try:
field, value = (x.strip() for x in line.split('='))
except ValueError:
continue
if field == '__version_info__':
value = value.strip('[]()')
value = '.'.join(x.strip(' \'"') for x in value.split(','))
else:
value = value.strip('\'"')
distmeta[field] = value
setup(
name='wiggelen',
version=distmeta['__version_info__'],
description='Working with wiggle tracks in Python',
long_description=long_description,
author=distmeta['__author__'],
author_email=distmeta['__contact__'],
url=distmeta['__homepage__'],
license='MIT License',
platforms=['any'],
packages=['wiggelen'],
install_requires=install_requires,
entry_points = {
'console_scripts': ['wiggelen = wiggelen.commands:main']
},
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
keywords='bioinformatics'
)
|
martijnvermaat/wiggelen
|
setup.py
|
Python
|
mit
| 2,054
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as file:
long_description = file.read()
setup(name='passgen',
version='1.1.1',
description='Random password generator',
long_description=long_description,
url='https://github.com/soslan/passgen',
author='Soslan Khubulov',
author_email='soslanx@gmail.com',
license='MIT',
package_dir={'': 'src'},
entry_points={
'console_scripts': [
'passgen = passgen:main',
],
},
py_modules=['passgen'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: Utilities',
],
keywords='password random',
)
|
soslan/passgen
|
setup.py
|
Python
|
mit
| 1,122
|
def break_numbers(stuff):
"""This functionality will break up numbers for us."""
numbers = stuff.split()
return numbers
def sort_numbers(numbers):
"""This functionality will sort numbers for us"""
numbers.sort()
return numbers
def add_numbers(sorted_numbers):
"""This functionality will add numbers for us"""
a1 = sorted_numbers.pop()
a2 = sorted_numbers.pop()
a3 = sorted_numbers.pop()
a4 = sorted_numbers.pop()
a5 = sorted_numbers.pop()
total = int(a1) + int(a2) + int(a3) + int(a4) + int(a5)
return total
def mean(total):
mean = total / 5
return mean
stuff = raw_input("Enter 5 numbers separated by commas: ")
a1 = break_numbers(stuff)
print "Numbers have been split with , as delimiter: %s" % a1
a2 = sort_numbers(a1)
print "Numbers are sorted : %s" % a2
a3 = add_numbers(a2)
print "Adding all the numbers we get: %d" % a3
a4 = mean(a3)
print "Mean of Numbers is: %d " % a4
|
mrniranjan/python-scripts
|
reboot/math43.py
|
Python
|
gpl-2.0
| 911
|
from pygame_font import *
|
gmittal/aar-nlp-research-2016
|
src/pygame-pygame-6625feb3fc7f/symbian/lib/font.py
|
Python
|
mit
| 28
|
from flask import render_template
from flask.ext.login import current_user
def sponsors():
return render_template('sponsors.html', active_page='sponsors')
|
PhoenixRacing/PhoenixRacingWebApp-noregrets
|
application/controllers/sponsors.py
|
Python
|
bsd-3-clause
| 157
|
#!/usr/bin/python
import yaml
import re
import os
import subprocess
import ConfigParser
import time
import hashlib
import sys
'''This file will be looked up under OPKG_DIR/conf'''
OPKG_CONF_FILE='/etc/opkg/conf/opkg.env'
META_FILE_PREVIOUS='Previous.meta'
META_FILE_LATEST='Latest.meta'
EXTRA_PARAM_DELIM=','
EXTRA_PARAM_KEY_VAL_SEP='='
''' Classes '''
'''Tracks local env configuration'''
class EnvConfig():
def __init__(self):
self.config_file=OPKG_CONF_FILE
self.conf=None
def setConfigFile(self,config_file):
self.config_file=config_file
def loadConfigFile(self):
self.conf = ConfigParser.ConfigParser()
if os.path.isfile(self.config_file) and os.access(self.config_file, os.R_OK):
self.conf.read(self.config_file)
def updateConfigItem(self,section,key,val):
try:
self.conf.set(section,key,val)
except:
print "Warning: Cannot locate config item "+key+" in "+self.config_file
return False
return True
def getConfigItem(self,section,item):
return self.conf.get(section,item)
'''Class to read opkg manifest '''
class Manifest():
def __init__(self, manifest_path):
self.manifest_file=manifest_path
self.manifest_dict=None
self.rel_num=None
with open(self.manifest_file, 'r') as stream:
try:
self.manifest_dict=yaml.load(stream)
except yaml.YAMLError as exc:
print "Error: Problem loading manifest file "+self.manifest_file
print(exc)
return
if 'rel_num' not in self.manifest_dict:
print "rel_num not found in "+self.manifest_file
self.manifest_dict=None
return
self.rel_num=self.manifest_dict['rel_num']
def getConfig(self):
return self.manifest_dict
'''The action lines are available as dictionaries and converting those to a list is easier to deal with down the road.
It could be a mixed-bag, plain list item or a dict element.
'''
def getSectionItems(self, section):
if section not in self.manifest_dict: return None
lines = list()
for item in self.manifest_dict[section]:
if type(item) is str:
lines.append(item)
elif type(item) is dict:
for key in item:
val = item[key]
lines.append(key + ':' + val)
break
else:
print "Error: Unknown format in "+self.manifest_file+", section: "+section
return None
return lines
'''Class for core Open Pkg'''
class Pkg():
def __init__(self,name):
if re.search("\W", name) is not None:
print "Error: Illegal character in package name (" + name + ")"
return
self.name=name
self.rel_num=None
self.rel_ts=None
self.is_release=False
self.manifest_file = name + '.yml'
self.tarball_name = name + '.tgz'
self.md5=None #md5 of package being installed.
self.manifest=None
self.build_root = os.getcwd()
self.manifest_path=self.build_root+'/'+self.manifest_file
'''stage_dir - where files to create a tarball are staged and
files from a tarball are extracted for deployment.
By default, the stage_dir is set for action create pkg.
'''
self.stage_dir = self.build_root + '/.pkg/' + name
self.deploy_dir=self.stage_dir+'/.deploy'
self.env_conf=None
self.install_meta=None #meta data of existing installation
self.install_md5=None #md5 of currently installed version
@staticmethod
def parseName(pkg_label):
'''pkg can be specified in following ways:
- /path/to/mypkg.tgz -- tarball available on locally
- /path/to/mypkg-rel_num.tgz -- tarball available on locally
- mypkg
- mypkg-rel_num
'''
pkg_name_rel_num = os.path.basename(pkg_label)
pkg_name_rel_num = pkg_name_rel_num.replace('.tgz', '')
tarball_name = pkg_name_rel_num + '.tgz'
pkg_name = re.split('-', pkg_name_rel_num)[0]
return pkg_name,pkg_name_rel_num,tarball_name
@staticmethod
def parseTarballName(tarball_name):
rel_num, rel_ts = 'dev', None
'''The dev version will not have any rel_num or rel_ts
The parsing is based on the assumption that the tarball names can have only 2 formats:
name.tgz - dev
name-rel_num-rel_ts.tgz - release
'''
m = re.search('.+?-(.+?)-(.+).tgz', tarball_name)
if m:
rel_num = m.group(1)
rel_ts = m.group(2)
return rel_num,rel_ts
def setManifest(self,f):
self.manifest_path=f
def loadManifest(self):
self.manifest = Manifest(self.manifest_path)
def setRelNum(self,rel_num):
self.rel_num=rel_num
def setRelTs(self,rel_ts):
self.rel_ts=rel_ts
'''Meta file has this syntax: pkg_name,rel_num,rel_ts,pkg_md5,deploy_ts'''
def loadMeta(self):
self.install_meta=dict()
meta_dir=self.env_conf['basic']['opkg_dir'] + '/meta/' + self.name
meta_path = meta_dir + "/" + META_FILE_LATEST
self.install_meta['latest_install']=self.loadMetaFile(meta_path)
if not self.install_meta['latest_install']:
print "Info: No active installation of "+self.name+" found at "+self.env_conf['basic']['opkg_dir']
meta_path = meta_dir + "/" + META_FILE_PREVIOUS
self.install_meta['previous_install'] = self.loadMetaFile(meta_path)
if not self.install_meta['previous_install']:
print "Info: No previous installation of "+self.name+" found."
if self.install_meta['latest_install']:
self.install_md5 = self.install_meta['latest_install']['pkg_md5']
def getMeta(self):
return self.install_meta
'''Load .meta files that keep track of deployments and verifies the data in those.
The meta data on package deployment is a single line with attrs delimited by , in the following order:
pkg_name,pkg_rel_num,pkg_ts,pkg_md5,deploy_ts
'''
def loadMetaFile(self,file_path):
if not os.path.isfile(file_path): return None
str = loadFile(file_path)
install_info = str.strip().split(',')
if len(install_info) < 5: return None
meta=dict()
meta['pkg_name']=install_info[0]
meta['pkg_rel_num'] = install_info[1]
meta['pkg_ts'] = install_info[2]
meta['pkg_md5'] = install_info[3]
meta['deploy_ts'] = install_info[4]
return meta
'''Reset install meta files upon successful installation of a package.'''
def registerInstall(self,deploy_inst):
meta_dir=deploy_inst.opkg_dir + "/meta/" + self.name
meta_file_previous = meta_dir + "/" + META_FILE_PREVIOUS
meta_file_latest = meta_dir + "/" + META_FILE_LATEST
runCmd("mkdir -p "+meta_dir)
if os.path.exists(meta_file_latest):
if not execOSCommand("mv -f " + meta_file_latest + " " + meta_file_previous):
print "Problem moving " + meta_file_latest + " as " + meta_file_previous
return False
'''Meta file has this syntax: pkg_name,rel_num,rel_ts,pkg_md5,deploy_dir'''
rel_num=''
if self.rel_num: rel_num=self.rel_num
rel_ts=0
if self.rel_ts: rel_ts=self.rel_ts
strx = self.name+','+rel_num+','+str(rel_ts)+','+self.pkg_md5+','+deploy_inst.deploy_ts
cmd = "echo " + strx + ">" + meta_file_latest
if not execOSCommand(cmd):
print "Error: Couldn't record the package installation."
return False
self.loadMeta()
return True
def setRelease(self,is_release=True):
self.is_release=is_release
def setEnvConfig(self,env_conf):
self.env_conf=env_conf
def create(self):
self.loadManifest() #the default manifest points to that in build dir
runCmd("mkdir -p " + self.stage_dir)
os.chdir(self.stage_dir)
runCmd("rm -rf *")
'''Copy manifest to the deploy folder in archive'''
runCmd('mkdir -p ' + self.deploy_dir)
if not execOSCommand('cp ' + self.manifest_path + ' ' + self.deploy_dir + '/'):
print "Error: Problem copying package manifest."
return False
'''Stage files content for archiving'''
content_lines=self.manifest.getSectionItems('files')
if content_lines:
for content_line in content_lines:
tgt,src=re.split(':',content_line)
if not self.stageContent(self.build_root+'/'+src,tgt):
print "Error: Cannot copy content at "+src+" for archiving."
return False
'''Make tarball and clean up the staging area'''
if self.is_release:
rel_num=self.manifest['rel_num']
self.tarball_name = self.name + '-' + rel_num + '.tgz'
os.chdir(self.stage_dir)
rc = runCmd("tar czf " + self.tarball_name + ' * .deploy')
if rc != 0:
print "Error: Couldn't create package " + self.tarball_name
return False
os.chdir(self.build_root)
rc = runCmd('mv ' + self.stage_dir + '/' + self.tarball_name + ' ./')
if rc == 0:
runCmd("rm -rf " + self.stage_dir)
print "Package " + self.tarball_name + " has been created."
else:
print "Error: Package " + self.tarball_name + " couldn't be created."
return False
def stageContent(self,src,tgt):
os.chdir(self.stage_dir)
if os.path.isdir(src):
'''skip build folder silently, but individual files can still be added.'''
if runCmd("mkdir -p " + tgt) != 0: return False
if runCmd("cp -r " + src + '/. ' + tgt + '/') != 0: return False
else:
tgt_dir = os.path.dirname(tgt)
if tgt_dir != '':
if runCmd("mkdir -p " + tgt_dir) != 0: return False
if runCmd("cp " + src + ' ' + tgt) != 0: return False
return True
'''Execute the deploy playbook for a package specified in the manifest'''
def install(self,tarball_path,deploy_inst):
deploy_inst.logHistory("Installing package "+self.name+" using "+tarball_path)
''' Track the md5 of package being installed '''
self.pkg_md5=getFileMD5(tarball_path)
'''Extract the tarball in stage_dir, to prepare for deploy playbook to execute steps'''
stage_dir=deploy_inst.opkg_dir+'/pkgs/'+self.name+'/'+deploy_inst.deploy_ts
if not execOSCommand('mkdir -p ' + stage_dir):
return
os.chdir(stage_dir)
if not execOSCommand('tar xzf ' + tarball_path):
print "Error: Problem extracting " + tarball_path + " in " + stage_dir
return False
'''Setup install location for the new deployment of pkg'''
deploy_dir = deploy_inst.deploy_root + '/' + self.name
deploy_inst.extra_vars['OPKG_DEPLOY_DIR'] = deploy_dir
if not execOSCommand('mkdir -p ' + deploy_dir):
return False
'''Resolve manifest, and files defined under templates and replaces with actual values
defined for this specific deployment.'''
os.chdir('.deploy')
manifest_path=os.getcwd()+'/'+self.manifest_file
tmpl_inst=Tmpl(manifest_path)
if not tmpl_inst.resolveVars(deploy_inst.getVars()):
print "Error: Problem resolving "+self.manifest_file
return False
pkg_manifest=Manifest(manifest_path)
'''Run pre-deploy steps.
These are run immediately after the tarball is extracted in stage_dir
'''
steps = pkg_manifest.getSectionItems('pre_deploy')
if steps:
for step in steps:
if not execOSCommand(step):
print "Error: Problem executing the following step in pre_deploy phase: "+step
return False
'''copy targets entries to install_root'''
targets=pkg_manifest.getSectionItems('targets')
if targets:
for target in targets:
tgt,src=re.split(':',target)
source_path,target_path = src,tgt
if not re.match("^\/", src): source_path = stage_dir + "/" + src
if not re.match("^\/", tgt): target_path = deploy_dir + "/" + tgt
if not createTargetPath(source_path, target_path):
print "Error: Base dir of " + target_path + " cannot be created."
return False
cmd='cp '+source_path+' '+target_path #if a file
if os.path.isdir(source_path): cmd='cp -r '+source_path+'/* '+target_path+'/'
if not execOSCommand(cmd):
print "Error: Problem copying from " + stage_dir + ". command: " + cmd
return False
'''Generate deployed template files with actual values, variables are marked as {{ var }} '''
templates = pkg_manifest.getSectionItems('templates')
if templates:
for tmpl in templates:
tmpl_path=tmpl
if not re.match("^\/", tmpl): tmpl_path = deploy_dir + "/" + tmpl
tmpl_inst=Tmpl(tmpl_path)
if not tmpl_inst.resolveVars(deploy_inst.getVars()):
print "Error: Couldn't install resolved files for those marked as templates, with real values."
return False
'''Replaces tokens in files flagged for that, tokens are unmarked like PORT=80 etc'''
if 'replaces' in pkg_manifest.getConfig():
for replaces_file in pkg_manifest.getConfig()['replaces']:
'''Each entry for replacement in the replaces_file is a dict as replacement entries are delimited with :'''
replaces_list=list()
for token_dict in pkg_manifest.getConfig()['replaces'][replaces_file]:
for key in token_dict:
replaces_list.append(key+Tmpl.TMPL_KEY_VAL_DELIM+token_dict[key])
break
replaces_path=replaces_file
if not re.match("^\/", replaces_file): replaces_path = deploy_dir + "/" + replaces_file
tmpl_inst=Tmpl(replaces_path)
if not tmpl_inst.replaceTokens(replaces_list):
print "Error: Couldn't install resolved files for those marked with having tokens in the 'replaces' section, with real values."
return False
'''Symlinks'''
symlinks = pkg_manifest.getSectionItems('symlinks')
if symlinks:
for symlink in symlinks:
tgt_path,src_path=re.split(':',symlink)
if not re.match("^\/", tgt_path): tgt_path = deploy_dir + "/" + tgt_path
if not re.match("^\/", src_path): src_path = deploy_dir + "/" + src_path
cmd = "ln -sfn " + src_path + " " + tgt_path
if not execOSCommand(cmd):
print "Error: Problem creating symlink " + cmd
return False
'''Permissions
The list items will be returned in the format, dir:owner:group mod; eg: 'apps:root:root 0444'
Parse each line accordingly.
'''
perms = pkg_manifest.getSectionItems('permissions')
if perms:
for perm in perms:
fpath, perm_opt = perm.split(':',1)
chown_opt,chmod_opt = perm_opt.split(' ')
if not re.match("^\/", fpath): fpath = deploy_dir + "/" + fpath
cmd="chown -R "+chown_opt+" "+fpath+';chmod -R '+chmod_opt+' '+fpath
if not execOSCommand(cmd):
print "Error: Problem setting permissions on " + fpath+'. Command: '+cmd
return False
'''Post-deploy steps'''
steps = pkg_manifest.getSectionItems('post_deploy')
if steps:
for step in steps:
if not execOSCommand(step):
print "Error: Problem executing the following step in post_deploy phase: " + step
return False
''' Register the installation '''
self.registerInstall(deploy_inst)
'''delete the stage_dir upon successful installation of the package'''
os.chdir("/tmp") # a workaround to avoid system warning when curr dir stage_dir is deleted.
if not execOSCommand('rm -r ' + stage_dir):
print "Warning: Couldn't delete " + stage_dir
print "Info: Package "+self.name+" has been installed at "+deploy_dir
return True
def isInstalled(self,tarball_path):
if not self.getMeta()['latest_install']:
return False
md5_local = self.install_meta['latest_install']['pkg_md5']
return (getFileMD5(tarball_path) == md5_local)
'''Class to process the main opkg actions'''
class opkg():
ACTIONS=['create','ls','rls','get','put','deploy','clean','start','stop','restart','rollback']
'''action specific required configs'''
ACTION_CONFIGS={
'deploy': ['install_root'],
'ls':['install_root'],
'put':['repo_type','repo_path'],
'get': ['repo_type', 'repo_path']
}
OPKG_LABEL='opkg'
OPKG_VERSION='0.1.0'
def __init__(self,params):
self.arg_dict=dict()
self.arg_dict['opkg_cmd']=params[0]
self.action=None #This will be available in the env as OPKG_ACTION
self.extra_vars=dict()
self.conf_file=None
self.configs=dict()
self.pkgs=None
self.opkg_dir=None
if len(params) < 2:
self.printHelp()
Exit(0)
'''action is positional'''
self.action=params[1]
'''The args can be in these formats: argx,--opt_x,--opt_y=opt_val'''
for argx in params[1:]:
if re.match("^--", argx) is not None:
m = re.split('--', argx)
n = re.match("^(.+?)=(.+)", m[1])
if n is not None:
self.arg_dict[n.group(1)] = n.group(2)
else:
self.arg_dict[m[1]] = ''
else:
self.arg_dict[argx] = ''
'''Set extra-vars dict'''
if 'extra-vars' in self.arg_dict:
extra_vars = re.split(EXTRA_PARAM_DELIM,self.arg_dict['extra-vars'])
for extra_var in extra_vars:
k, v = re.split(EXTRA_PARAM_KEY_VAL_SEP,extra_var)
self.extra_vars[k] = v
if self.arg_dict.has_key('help'):
self.printHelp()
Exit(0)
elif self.arg_dict.has_key('version'):
self.printVersion()
Exit(0)
'''Check if config file is specified, if it exists load and initialize configs from it.
Note, the config items are grouped under sections in config file, but,
from command-line there is no option to qualify an item with section and so it should be unique across sections.
'''
opkg_conf_file=OPKG_CONF_FILE
if 'opkg_dir' in self.arg_dict: opkg_conf_file=self.arg_dict['opkg_dir']+'/conf/opkg.env'
self.conf_file=opkg_conf_file
self.loadConfigFile()
self.opkg_dir=self.configs['basic']['opkg_dir']
'''Override config items specified in config file with those from command-line'''
for section in self.configs:
for item in self.configs[section]:
if item in self.arg_dict: self.configs[section][item]=self.arg_dict[item]
'''Parse out common options such as pkg'''
if 'pkg' in self.arg_dict:
self.pkgs=re.split(',',self.arg_dict['pkg'])
return
'''Loads configs from opkg.env as a dictionary'''
def loadConfigFile(self):
Config = ConfigParser.ConfigParser()
Config.read(self.conf_file)
sections=Config.sections()
for section in sections:
self.configs[section]=dict()
for item in Config.options(section):
self.configs[section][item]=Config.get(section,item)
return
def printVersion(self):
print opkg.OPKG_LABEL + " v" + opkg.OPKG_VERSION
return True
def printHelp(self):
self.printVersion()
script = os.path.basename(self.arg_dict['opkg_cmd'])
print "Usages:"
print script + " --version"
print script + " --help"
print script + " ls [--pkg=pkg1,pkg2,...]"
print script + " rls [--pkg=pkg1,pkg2,...]"
print script + " create --pkg=pkg1,pkg2,... [--release]"
print script + " put --file=/tarball/with/full/path"
print script + " get --pkg=pkg1,pkg2,... [--release=REL_NUM|dev] [--path=/download/path]"
print script + " deploy --pkg=pkg1,pkg2[-REL_NUM|dev],... [--install_root=/path/to/install]"
print script + " start|stop|restart|reload --pkg=pkg1,pkg2,... [--install_root=/path/to/install]"
print script + " rollback --pkg=pkg1,pkg2,... [--install_root=/path/to/install]"
print script + " clean [--pkg=pkg1,pkg2,...] [--count=COUNT] [--install_root=/path/to/install]"
return True
'''Execute the action'''
def main(self):
if self.action=='create':
self.extra_vars['ACTION'] = 'create'
for pkg in self.pkgs:
pkg_inst=Pkg(pkg)
pkg_inst.create()
elif self.action=='ls':
self.extra_vars['ACTION'] = 'ls'
for pkg in self.pkgs:
pkg_name, pkg_name_rel_num, tarball_name = Pkg.parseName(pkg)
pkg_inst=Pkg(pkg_name)
pkg_inst.setEnvConfig(self.configs)
pkg_inst.loadMeta()
pkg_meta=pkg_inst.getMeta()
if not pkg_meta: continue
print pkg_name+'-'+pkg_meta['latest_install']['pkg_rel_num']
elif self.action=='deploy':
self.extra_vars['ACTION'] = 'deploy'
deploy_inst=Deploy(self.configs,self.arg_dict,self.extra_vars)
for pkg in self.pkgs:
pkg_name,pkg_name_rel_num,tarball_name=Pkg.parseName(pkg)
is_local=False
if re.match('^.+?\.tgz',pkg): is_local=True
download_dir=self.opkg_dir+'/pkgs/'+pkg_name
execOSCommand("mkdir -p "+download_dir)
if is_local:
if not execOSCommand("cp "+pkg+" "+download_dir+"/"):
print "Error: Cannot copy tarball "+tarball_name+" to staging location "+download_dir
Exit(1)
else:
'''The tarball has to be downloaded in this case from a repo.
The possible values of pkg would be mypkg or mypkg-rel_num.
'''
print "Error: tarball cannot be downloaded for package specified: "+pkg
Exit(1)
'''Start installation of the package once the tarball is copied to staging location.'''
deploy_inst.installPackage(pkg_name,tarball_name)
else:
print "Unsupported action: "+self.action
'''Class for deployment specific methods'''
class Deploy():
def __init__(self,env_conf,deploy_options,extra_vars=None):
self.deploy_ts=str(int(time.time()))
self.env_conf=env_conf
self.install_root=self.env_conf['basic']['install_root']
self.deploy_root=self.install_root+'/installs/'+self.deploy_ts
self.opkg_dir=self.env_conf['basic']['opkg_dir']
self.download_root=self.opkg_dir + '/pkgs'
self.history_dir=self.opkg_dir + '/history'
self.extra_vars=extra_vars
self.deploy_force=False
if 'force' in deploy_options: self.deploy_force=True
if not self.extra_vars: self.extra_vars=dict()
self.extra_vars['OPKG_NAME'] = None
self.extra_vars['OPKG_REL_NUM'] = None
self.extra_vars['OPKG_TS'] = None
self.extra_vars['OPKG_ACTION'] = None
if not execOSCommand('mkdir -p ' + self.download_root): return
if not execOSCommand('mkdir -p ' + self.history_dir): return
'''Returns the extra-vars specified from commandline and the OPKG_ vars'''
def getVars(self):
return self.extra_vars
def logHistory(self,log_entry):
history_log=self.deploy_ts+": "+log_entry
history_file=self.env_conf['basic']['deploy_history_file']
with open(self.history_dir + '/' + history_file, "a") as hf: hf.write(history_log+"\n")
return True
'''The tarball is downloaded/copied to download_dir'''
def installPackage(self,pkg_name,tarball_name):
rel_num,rel_ts=Pkg.parseTarballName(tarball_name)
self.extra_vars['OPKG_NAME'] = pkg_name
self.extra_vars['OPKG_REL_NUM'] = rel_num
self.extra_vars['OPKG_TS'] = rel_ts
pkg=Pkg(pkg_name)
pkg.setRelNum(rel_num)
pkg.setRelTs(rel_ts)
pkg.setEnvConfig(self.env_conf)
pkg.loadMeta()
tarball_path=self.download_root+'/'+pkg_name+'/'+tarball_name
if not pkg.isInstalled(tarball_path) or self.deploy_force:
pkg.install(tarball_path,self)
else:
print "Info: This revision of package "+pkg_name+" is already installed at "+self.install_root+'/installs/'+pkg.getMeta()['latest_install']['deploy_ts']+'/'+pkg_name
print "Info: Use --force option to override."
return True
'''Utility classes '''
'''Utility class to do template related tasks'''
class Tmpl():
TMPL_KEY_VAL_DELIM=':'
def __init__(self,tmpl_path):
self.tmpl_path=tmpl_path
self.is_dir=False
if not os.path.exists(tmpl_path):
print "Error: " + tmpl_path + " doesn't exist."
return
if os.path.isdir(tmpl_path): self.is_dir = True
'''Recreates files under tmpl_path with values from vars_dict
The template vars are searched for using pattern {{ var }}
tmpl_path could be single file or a directory,
in the latter case all files in the dir will be checked for recursively.
'''
def resolveVars(self,vars_dict,backup=False):
if self.is_dir:
files=os.listdir(self.tmpl_path)
for f in files:
ftmpl=Tmpl(self.tmpl_path+'/'+f)
ftmpl.resolveVars(vars_dict,backup)
else:
if not self.resolveVarsFile(self.tmpl_path,vars_dict,backup):
print "Error: Failed to resolve template "+self.tmpl_path
return False
return True
'''Recreates files under tmpl_path with values from vars_list
vars_list contains a search/replace pair SEARCH-STR:REPLACE-STR, the file is updated by replacing all SEARCH-STR with REPLACE-STR
tmpl_path could be single file or a directory,
in the latter case all files in the dir will be checked for recursively.
'''
def replaceTokens (self,tokens_list,backup=False):
if self.is_dir:
files = os.listdir(self.tmpl_path)
for f in files:
fpath = self.tmpl_path + '/' + f
ftmpl = Tmpl(fpath)
ftmpl.replaceTokens(tokens_list,backup)
else:
if not self.replaceTokensFile(self.tmpl_path,tokens_list, backup):
print "Error: Failed to resolve template " + self.tmpl_path
return False
return True
def resolveVarsFile(self,file_path, vars_dict, backup=False):
str = loadFile(file_path)
for var in vars_dict:
if var not in vars_dict or not vars_dict[var]: continue
str = re.sub('{{ ' + var + ' }}', vars_dict[var], str)
if backup:
if not execOSCommand("mv " + file_path + " " + file_path + '.' + str(int(time.time()))):
print "Error: Couldn't backup " + file_path
return False
try:
with open(file_path, "w") as f:
f.write(str)
except EnvironmentError:
print "Error: Cannot save updated " + file_path
return False
return True
def replaceTokensFile(self,file_path, tokens_list, backup=False):
str = loadFile(file_path)
for token in tokens_list:
pattern, replace = re.split(Tmpl.TMPL_KEY_VAL_DELIM,token)
str = re.sub(pattern, replace, str)
if backup:
if not execOSCommand("mv " + file_path + " " + file_path + '.' + str(int(time.time()))):
print "Error: Couldn't backup " + file_path
return False
try:
with open(file_path, "w") as f:
f.write(str)
except EnvironmentError:
print "Error: Cannot save updated " + file_path
return False
return True
''' Utility Functions '''
'''returns the status code after executing cmd in the shell'''
def runCmd(cmd):
return subprocess.call(cmd,shell=True)
'''process return status from a command execution '''
def execOSCommand(cmd):
rc=runCmd(cmd)
if rc!=0:
print "Error executing "+cmd
return False
return True
'''Returns output of a command run in the shell'''
def getCmdOutput(cmd):
output=subprocess.check_output(cmd, shell=True)
return output.strip()
'''Execute a command locally: on Mac or Linux, so this must be platform independent'''
def execCmdLocal(cmd):
global verbose
out=None
try:
out=subprocess.check_output(cmd,shell=True)
except Exception as e:
if verbose: print (e)
return out
'''returns the file content as a string.'''
def loadFile(file_path):
s = open(file_path)
str = s.read()
s.close()
return str
'''Create a target location depending on what is to be copied from source'''
def createTargetPath(source_path,target_path):
d=target_path
if os.path.isfile(source_path): d=os.path.dirname(target_path)
base_dir=d.split()[0]
if not os.path.exists(base_dir):
cmd="mkdir -p "+base_dir
if not execOSCommand(cmd):
print "Error: Couldn't create "+base_dir
return False
return True
def Exit(rc):
sys.exit(rc)
def getFileMD5(file_path):
return hashlib.md5(open(file_path, 'rb').read()).hexdigest()
''' main '''
opkg_cmd=opkg(sys.argv)
opkg_cmd.main()
|
kurianinc/opkg
|
src/openpkg.py
|
Python
|
mit
| 30,669
|
import sys
sus = []
def guess():
def solve(N, B, F):
sus.append((0,N,B))
brokens = []
while F:
if len(brokens) == B: break
print(guess())
sys.stdout.flush()
res = input()
while sus:
r = sus.pop()
F -= 1
print(" ".join(brokens))
sys.stdout.flush()
res = input()
print(res)
return
for case in range(1, int(input())+1):
N, B, F = map(int, input().strip().split(" "))
solve(N, B, F)
|
zuun77/givemegoogletshirts
|
codejam/2019/q4.py
|
Python
|
apache-2.0
| 484
|
# -*- coding: utf-8 -*-
"""
Tests for self assessment handlers in Open Assessment XBlock.
"""
import copy
import json
import datetime
import mock
import pytz
from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from openassessment.xblock.data_conversion import create_rubric_dict
from .base import XBlockHandlerTestCase, scenario
class TestSelfAssessment(XBlockHandlerTestCase):
"""
Tests for the self-assessment XBlock handler.
"""
maxDiff = None
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {},
'overall_feedback': ""
}
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a self-assessment
resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertTrue(resp['success'])
# Expect that a self-assessment was created
assessment = self_api.get_assessment(submission["uuid"])
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
self.assertEqual(assessment['scorer_id'], 'Bob')
self.assertEqual(assessment['score_type'], 'SE')
self.assertEqual(assessment['feedback'], u'')
parts = sorted(assessment['parts'])
self.assertEqual(len(parts), 2)
self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
self.assertEqual(parts[0]['option']['name'], 'Fair')
self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_no_submission(self, xblock):
# Submit a self-assessment without first creating a submission
resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
self.assertGreater(len(resp['msg']), 0)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_updates_workflow(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
with mock.patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
# Submit a self-assessment
resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json')
# Verify that the workflow is updated when we submit a self-assessment
self.assertTrue(resp['success'])
expected_reqs = {
"peer": { "must_grade": 5, "must_be_graded_by": 3 }
}
mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs)
@scenario('data/feedback_only_criterion_self.xml', user_id='Bob')
def test_self_assess_feedback_only_criterion(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a self assessment for a rubric with a feedback-only criterion
assessment_dict = {
'options_selected': {u'vocabulary': u'good'},
'criterion_feedback': {
u'vocabulary': 'Awesome job!',
u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': 'fairly illegible.'
},
'overall_feedback': u''
}
resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json')
self.assertTrue(resp['success'])
assessment = self_api.get_assessment(submission["uuid"])
# Check the assessment for the criterion that has options
self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
self.assertEqual(assessment['parts'][0]['option']['name'], 'good')
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'fairly illegible.')
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_workflow_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
xblock.create_submission(student_item, self.SUBMISSION)
with mock.patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
# Simulate a workflow error
mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowInternalError
# Submit a self-assessment
resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json')
# Verify that the we get an error response
self.assertFalse(resp['success'])
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler_missing_keys(self, xblock):
# Missing options_selected
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment['options_selected']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('options_selected', resp['msg'])
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_api_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
xblock.create_submission(student_item, self.SUBMISSION)
# Submit a self-assessment
# Simulate an error and expect a failure response
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_api:
mock_api.SelfAssessmentRequestError = self_api.SelfAssessmentRequestError
mock_api.create_assessment.side_effect = self_api.SelfAssessmentRequestError
resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
class TestSelfAssessmentRender(XBlockHandlerTestCase):
"""
Test rendering of the self-assessment step.
The basic strategy is to verify that we're providing the right
template and context for each possible state,
plus an integration test to verify that the context
is being rendered correctly.
"""
@scenario('data/self_assessment_unavailable.xml', user_id='Bob')
def test_unavailable(self, xblock):
# Start date is in the future for this scenario
self._assert_path_and_context(
xblock,
'openassessmentblock/self/oa_self_unavailable.html',
{'self_start': datetime.datetime(5999, 1, 1).replace(tzinfo=pytz.utc), 'allow_latex': False}
)
@scenario('data/self_assessment_closed.xml', user_id='Bob')
def test_closed(self, xblock):
# Due date is in the past for this scenario
self._assert_path_and_context(
xblock,
'openassessmentblock/self/oa_self_closed.html',
{'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc), 'allow_latex': False}
)
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_open_no_submission(self, xblock):
# Without making a submission, this step should be unavailable
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_unavailable.html', {'allow_latex': False}
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_in_peer_step(self, xblock):
# Make a submission, so we're in the peer-assessment step
xblock.create_submission(
xblock.get_student_item_dict(), u"ⱣȺꝑȺ đøn'ŧ ŧȺꝁɇ nø mɇss."
)
# Should still not be able to access self-assessment
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_unavailable.html', {'allow_latex': False}
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_in_waiting_for_peer_step(self, xblock):
# In the peer-->self configuration, if we're done with the
# self step, but not with the peer step (because we're waiting
# to be assessed), then the self step should display as completed.
xblock.create_submission(
xblock.get_student_item_dict(), u"𝓟𝓪𝓼𝓼 𝓽𝓱𝓮 𝓹𝓮𝓪𝓼"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex': False},
workflow_status='waiting',
status_details={
'self': {'complete': True},
'peer': {'complete': False}
}
)
@scenario('data/self_then_peer.xml', user_id="The Bee Gees")
def test_self_then_peer(self, xblock):
xblock.create_submission(
xblock.get_student_item_dict(), u"Stayin' alive!"
)
# In the self --> peer configuration, self can be complete
# if our status is "peer"
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex': False},
workflow_status="peer",
status_details={
'self': {'complete': True},
'peer': {'complete': False}
}
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_done_status(self, xblock):
# Simulate the workflow status being "done"
xblock.create_submission(
xblock.get_student_item_dict(), u"Ⱥɨn'ŧ ɨŧ fᵾnꝁɏ"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex':False},
workflow_status='done'
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_self_assessing(self, xblock):
# Simulate the workflow being in the self assessment step
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_assessment.html',
{
'rubric_criteria': xblock.rubric_criteria,
'estimated_time': '20 minutes',
'self_submission': submission,
'allow_file_upload': False,
'self_file_url': '',
'allow_latex': False,
},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_open_completed_self_assessment(self, xblock):
# Simulate the workflow being in the self assessment step
# and we've created a self-assessment
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self_api.create_assessment(
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex': False},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_closed.xml', user_id='Bob')
def test_started_and_past_due(self, xblock):
# Simulate the workflow being in the self assessment step
# Since we're past the due date, the step should appear closed.
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self._assert_path_and_context(
xblock,
'openassessmentblock/self/oa_self_closed.html',
{'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc), 'allow_latex': False},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_closed.xml', user_id='Bob')
def test_completed_and_past_due(self, xblock):
# Simulate having completed self assessment
# Even though the problem is closed, we should still see
# that we completed the step.
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self_api.create_assessment(
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
)
# This case probably isn't possible, because presumably when we create
# the self-assessment, the workflow status will be "waiting" or "done".
# We're checking it anyway to be overly defensive: if the user has made a self-assessment,
# we ALWAYS show complete, even if the workflow tells us we're still have status 'self'.
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html',
{'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc), 'allow_latex': False},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_integration(self, xblock):
# Simulate the workflow being in the self assessment step
# and we've created a self-assessment
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Test submission"
)
xblock.get_workflow_info = mock.Mock(return_value={
'status': 'self', 'submission_uuid': submission['uuid']
})
resp = self.request(xblock, 'render_self_assessment', json.dumps({}))
self.assertIn(u'in progress', resp.decode('utf-8').lower())
self.assertIn(u'Test submission', resp.decode('utf-8'))
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_retrieve_api_error(self, xblock):
# Simulate the workflow being in the self assessment step
xblock.get_workflow_info = mock.Mock(return_value={'status': 'self'})
# Simulate an error from the submission API
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_self:
mock_self.get_assessment.side_effect = self_api.SelfAssessmentRequestError
resp = self.request(xblock, 'render_self_assessment', json.dumps({}))
self.assertIn(u'error', resp.decode('utf-8').lower())
def _assert_path_and_context(
self, xblock, expected_path, expected_context,
workflow_status=None, status_details=None,
submission_uuid=None
):
"""
Render the self assessment step and verify:
1) that the correct template and context were used
2) that the rendering occurred without an error
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Keyword Arguments:
workflow_status (str): If provided, simulate this status from the workflow API.
workflow_status (str): If provided, simulate these details from the workflow API.
submission_uuid (str): If provided, simulate this submision UUI for the current workflow.
"""
if workflow_status is not None:
# Assume a peer-->self flow by default
if status_details is None:
status_details = {
'peer': {'complete': workflow_status == 'done'},
'self': {'complete': workflow_status in ['waiting', 'done']}
}
xblock.get_workflow_info = mock.Mock(return_value={
'status': workflow_status,
'status_details': status_details,
'submission_uuid': submission_uuid
})
path, context = xblock.self_path_and_context()
self.assertEqual(path, expected_path)
self.assertItemsEqual(context, expected_context)
# Verify that we render without error
resp = self.request(xblock, 'render_self_assessment', json.dumps({}))
self.assertGreater(len(resp), 0)
|
EDUlib/edx-ora2
|
openassessment/xblock/test/test_self.py
|
Python
|
agpl-3.0
| 17,776
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
import shutil
import tempfile
from django.conf import settings
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .settings import TEST_SETTINGS
class BaseStaticFilesTestCase(object):
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(
text,
self._get_file(force_text(filepath)),
"'%s' not in '%s'" % (text, filepath),
)
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
def render_template(self, template, **kwargs):
if isinstance(template, six.string_types):
template = Template(template)
return template.render(Context(kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
self.assertRaises(exc, self.assertStaticRenders, path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesTestCase, SimpleTestCase):
pass
class BaseCollectionTestCase(BaseStaticFilesTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(BaseCollectionTestCase, self).setUp()
temp_dir = tempfile.mkdtemp()
# Override the STATIC_ROOT for all tests from setUp to tearDown
# rather than as a context manager
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
# Same comment as in runtests.teardown.
self.addCleanup(shutil.rmtree, six.text_type(temp_dir))
def tearDown(self):
self.patched_settings.disable()
super(BaseCollectionTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity=0,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class CollectionTestCase(BaseCollectionTestCase, StaticFilesTestCase):
pass
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
|
moreati/django
|
tests/staticfiles_tests/cases.py
|
Python
|
bsd-3-clause
| 4,520
|
import unittest
from yyproto.header import Header
from yyproto.packer import Packer
from yyproto.unpacker import Unpacker
class TestHeader(unittest.TestCase):
def test_pack(self):
buf = bytearray(256)
packer = Packer(buf)
header = Header()
header.length = 10
header.id = 42
header.code = 200
packer.pack_message(header)
self.assertEqual(10, packer.offset)
def test_unpack(self):
buf = bytearray(256)
packer = Packer(buf)
header = Header()
header.length = 10
header.id = 42
header.code = 200
packer.pack_message(header)
self.assertEqual(10, packer.offset)
unpacker = Unpacker(bytes(buf[:packer.offset]))
hdr = Header()
hdr.unpack(unpacker)
self.assertEqual(header.length, hdr.length)
self.assertEqual(header.id, hdr.id)
self.assertEqual(header.code, hdr.code)
if __name__ == '__main__':
unittest.main()
|
decimalbell/yyproto
|
python/yyproto/tests/test_header.py
|
Python
|
bsd-3-clause
| 1,002
|
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import vim_util as vutil
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import imagecache
CONF = cfg.CONF
class ImageCacheManagerTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(ImageCacheManagerTestCase, self).setUp()
self._session = mock.Mock(name='session')
self._imagecache = imagecache.ImageCacheManager(self._session,
'fake-base-folder')
self._time = datetime.datetime(2012, 11, 22, 12, 00, 00)
self._file_name = 'ts-2012-11-22-12-00-00'
fake.reset()
def tearDown(self):
super(ImageCacheManagerTestCase, self).tearDown()
fake.reset()
def test_timestamp_cleanup(self):
def fake_get_timestamp(ds_browser, ds_path):
self.assertEqual('fake-ds-browser', ds_browser)
self.assertEqual('[fake-ds] fake-path', str(ds_path))
if not self.exists:
return
ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
self._time.strftime(imagecache.TIMESTAMP_FORMAT))
return ts
with test.nested(
mock.patch.object(self._imagecache, '_get_timestamp',
fake_get_timestamp),
mock.patch.object(ds_util, 'file_delete')
) as (_get_timestamp, _file_delete):
self.exists = False
self._imagecache.timestamp_cleanup(
'fake-dc-ref', 'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(0, _file_delete.call_count)
self.exists = True
self._imagecache.timestamp_cleanup(
'fake-dc-ref', 'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
expected_ds_path = ds_obj.DatastorePath(
'fake-ds', 'fake-path', self._file_name)
_file_delete.assert_called_once_with(self._session,
expected_ds_path, 'fake-dc-ref')
def test_get_timestamp(self):
def fake_get_sub_folders(session, ds_browser, ds_path):
self.assertEqual('fake-ds-browser', ds_browser)
self.assertEqual('[fake-ds] fake-path', str(ds_path))
if self.exists:
files = set()
files.add(self._file_name)
return files
with mock.patch.object(ds_util, 'get_sub_folders',
fake_get_sub_folders):
self.exists = True
ts = self._imagecache._get_timestamp(
'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(self._file_name, ts)
self.exists = False
ts = self._imagecache._get_timestamp(
'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertIsNone(ts)
def test_get_timestamp_filename(self):
self.useFixture(utils_fixture.TimeFixture(self._time))
fn = self._imagecache._get_timestamp_filename()
self.assertEqual(self._file_name, fn)
def test_get_datetime_from_filename(self):
t = self._imagecache._get_datetime_from_filename(self._file_name)
self.assertEqual(self._time, t)
def test_get_ds_browser(self):
cache = self._imagecache._ds_browser
ds_browser = mock.Mock()
moref = fake.ManagedObjectReference(value='datastore-100')
self.assertIsNone(cache.get(moref.value))
mock_get_method = mock.Mock(return_value=ds_browser)
with mock.patch.object(vutil, 'get_object_property', mock_get_method):
ret = self._imagecache._get_ds_browser(moref)
mock_get_method.assert_called_once_with(mock.ANY, moref, 'browser')
self.assertIs(ds_browser, ret)
self.assertIs(ds_browser, cache.get(moref.value))
def test_list_datastore_images(self):
def fake_get_object_property(vim, mobj, property_name):
return 'fake-ds-browser'
def fake_get_sub_folders(session, ds_browser, ds_path):
files = set()
files.add('image-ref-uuid')
return files
with test.nested(
mock.patch.object(vutil, 'get_object_property',
fake_get_object_property),
mock.patch.object(ds_util, 'get_sub_folders',
fake_get_sub_folders)
) as (_get_dynamic, _get_sub_folders):
fake_ds_ref = fake.ManagedObjectReference(value='fake-ds-ref')
datastore = ds_obj.Datastore(name='ds', ref=fake_ds_ref)
ds_path = datastore.build_path('base_folder')
images = self._imagecache._list_datastore_images(
ds_path, datastore)
originals = set()
originals.add('image-ref-uuid')
self.assertEqual({'originals': originals,
'unexplained_images': []},
images)
@mock.patch.object(imagecache.ImageCacheManager, 'timestamp_folder_get')
@mock.patch.object(imagecache.ImageCacheManager, 'timestamp_cleanup')
@mock.patch.object(imagecache.ImageCacheManager, '_get_ds_browser')
def test_enlist_image(self,
mock_get_ds_browser,
mock_timestamp_cleanup,
mock_timestamp_folder_get):
image_id = "fake_image_id"
dc_ref = "fake_dc_ref"
fake_ds_ref = mock.Mock()
ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=1,
freespace=1)
ds_browser = mock.Mock()
mock_get_ds_browser.return_value = ds_browser
timestamp_folder_path = mock.Mock()
mock_timestamp_folder_get.return_value = timestamp_folder_path
self._imagecache.enlist_image(image_id, ds, dc_ref)
cache_root_folder = ds.build_path("fake-base-folder")
mock_get_ds_browser.assert_called_once_with(
ds.ref)
mock_timestamp_folder_get.assert_called_once_with(
cache_root_folder, "fake_image_id")
mock_timestamp_cleanup.assert_called_once_with(
dc_ref, ds_browser, timestamp_folder_path)
def test_age_cached_images(self):
def fake_get_ds_browser(ds_ref):
return 'fake-ds-browser'
def fake_get_timestamp(ds_browser, ds_path):
self._get_timestamp_called += 1
path = str(ds_path)
if path == '[fake-ds] fake-path/fake-image-1':
# No time stamp exists
return
if path == '[fake-ds] fake-path/fake-image-2':
# Timestamp that will be valid => no deletion
return 'ts-2012-11-22-10-00-00'
if path == '[fake-ds] fake-path/fake-image-3':
# Timestamp that will be invalid => deletion
return 'ts-2012-11-20-12-00-00'
self.fail()
def fake_mkdir(session, ts_path, dc_ref):
self.assertEqual(
'[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00',
str(ts_path))
def fake_file_delete(session, ds_path, dc_ref):
self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path))
def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path):
self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path))
with test.nested(
mock.patch.object(self._imagecache, '_get_ds_browser',
fake_get_ds_browser),
mock.patch.object(self._imagecache, '_get_timestamp',
fake_get_timestamp),
mock.patch.object(ds_util, 'mkdir',
fake_mkdir),
mock.patch.object(ds_util, 'file_delete',
fake_file_delete),
mock.patch.object(self._imagecache, 'timestamp_cleanup',
fake_timestamp_cleanup),
) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete,
_timestamp_cleanup):
self.useFixture(utils_fixture.TimeFixture(self._time))
datastore = ds_obj.Datastore(name='ds', ref='fake-ds-ref')
dc_info = ds_util.DcInfo(ref='dc_ref', name='name',
vmFolder='vmFolder')
self._get_timestamp_called = 0
self._imagecache.originals = set(['fake-image-1', 'fake-image-2',
'fake-image-3', 'fake-image-4'])
self._imagecache.used_images = set(['fake-image-4'])
self._imagecache._age_cached_images(
'fake-context', datastore, dc_info,
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(3, self._get_timestamp_called)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'bdms_by_instance_uuid', return_value={})
def test_update(self, mock_bdms_by_inst):
def fake_list_datastore_images(ds_path, datastore):
return {'unexplained_images': [],
'originals': self.images}
def fake_age_cached_images(context, datastore,
dc_info, ds_path):
self.assertEqual('[ds] fake-base-folder', str(ds_path))
self.assertEqual(self.images,
self._imagecache.used_images)
self.assertEqual(self.images,
self._imagecache.originals)
with test.nested(
mock.patch.object(self._imagecache, '_list_datastore_images',
fake_list_datastore_images),
mock.patch.object(self._imagecache,
'_age_cached_images',
fake_age_cached_images)
) as (_list_base, _age_and_verify):
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'inst-1',
'uuid': uuidsentinel.foo,
'vm_state': '',
'task_state': ''},
{'image_ref': '2',
'host': CONF.host,
'name': 'inst-2',
'uuid': uuidsentinel.bar,
'vm_state': '',
'task_state': ''}]
all_instances = [fake_instance.fake_instance_obj(None, **instance)
for instance in instances]
self.images = set(['1', '2'])
datastore = ds_obj.Datastore(name='ds', ref='fake-ds-ref')
dc_info = ds_util.DcInfo(ref='dc_ref', name='name',
vmFolder='vmFolder')
datastores_info = [(datastore, dc_info)]
self._imagecache.update('context', all_instances, datastores_info)
|
rahulunair/nova
|
nova/tests/unit/virt/vmwareapi/test_imagecache.py
|
Python
|
apache-2.0
| 12,118
|
from Model import Model
class FMModel(Model):
################################
######### Constructors #########
def __init__(self,configModel,utils,config,strTrial):
super(FMModel,self).__init__(configModel,utils,strTrial)
self.dims = configModel[3][0]
self.logCV = utils.MODEL_LOG_PATH + self.tag + \
'_CV' + '_t' + strTrial
self.logTest = utils.MODEL_LOG_PATH + self.tag + \
'_test' + '_t' + strTrial
self.libFMBinary = utils.LIBFM_BINARY
self.strItr = config.FM_STR_ITER
self.globalBias = utils.FM_GLOBAL_BIAS
self.oneWay = utils.FM_ONE_WAY_INTERACTION
self.initStd = config.FM_INIT_STD
self.cleanData = utils.ORIGINAL_DATA_CLEAN_PATH
self.movieTag = utils.MOVIE_TAG_PATH
self.historyTag = utils.PROCESSED_HISTORY
self.social = utils.PROCESSED_SOCIAL
self.sharedTag = utils.PROCESSED_MOVIE_TAGS
self.meanMovieRating = utils.EFFECTS_MOVIE_PATH
##################################
########### Setup Data ###########
def setup(self):
### Take boot to feat ###
print("Setting Up Features")
self.setupFeatures()
### Take feat to bin to run ###
print("Converting Data")
self.dataConvert()
def libFMFormat(self, targetCol):
#-----------------------------
# Takes in raw data from original
# Then formats it into sparse .libfm matrix
#-----------------------------
import os
os.system('perl Models/libFM/triple_format_to_libfm.pl -in ' +
self.featTrain + ',' +
self.featCV + ',' +
self.featTest +
' -target '+str(targetCol)+' -separator \"\\t\"')
def dataConvert(self):
import os
#-----------------------------
# Takes in the .libfm sparse matrix
# Then converts it to the binary form
#-----------------------------
os.sys.stdout.write('{0}\r'.format('-- Converting (1/6) ' + self.tag + ' --') )
os.system('./Models/libFM/convert --ifile ' +
self.featTrain +
'.libfm ' + '--ofilex ' +
self.tmpTrain +
'.x --ofiley ' +
self.runTrain + '.y' +
'> /dev/null')
os.sys.stdout.write('{0}\r'.format('-- Converting (2/6) ' + self.tag + ' --') )
os.system('./Models/libFM/convert --ifile ' +
self.featCV +
'.libfm ' + '--ofilex ' +
self.tmpCV +
'.x --ofiley ' +
self.runCV + '.y' +
'> /dev/null')
os.sys.stdout.write('{0}\r'.format('-- Converting (3/6) ' + self.tag + ' --') )
os.system('./Models/libFM/convert --ifile ' +
self.featTest +
'.libfm ' + '--ofilex ' +
self.tmpTest +
'.x --ofiley ' +
self.runTest + '.y'
'> /dev/null')
os.sys.stdout.write('{0}\r'.format('-- Converting (4/6) ' + self.tag + ' --') )
os.system('./Models/libFM/transpose --ifile ' +
self.tmpTrain +
'.x --ofile ' +
self.runTrain + '.xt' +
'> /dev/null')
os.sys.stdout.write('{0}\r'.format('-- Converting (5/6) ' + self.tag + ' --') )
os.system('./Models/libFM/transpose --ifile ' +
self.tmpCV + '.x --ofile ' +
self.runCV + '.xt' +
'> /dev/null')
os.sys.stdout.write('{0}\r'.format('-- Converting (6/6) ' + self.tag + ' --') )
os.system('./Models/libFM/transpose --ifile ' +
self.tmpTest +
'.x --ofile ' +
self.runTest + '.xt' +
'> /dev/null')
print(self.tag + ' data is converted.')
#########################################
########### Develop Features ############
def setupFeatures(self):
import os
#-----------------------------------------------------------------
# creates the features for LibFM, then turns into sparse matrix
#-----------------------------------------------------------------
# ---- ---- Basic Features ---- ---- #
if self.featureSet == 'Basic':
os.system('cp ' + self.bootTrain + ' ' + self.featTrain)
os.system('cp ' + self.bootCV + ' ' + self.featCV )
os.system('cp ' + self.bootTest + ' ' + self.featTest )
self.libFMFormat(2)
elif self.featureSet == 'NearestNeighbor':
print('...Adding Nearest Neighbor Data')
moviesRatedByUserDict = self.moviesRatedByUserDict()
movieLocationDict = self.userMovieLocationDict(False,True)
self.addNearestNeighbor(self.bootTrain,self.featTrain,moviesRatedByUserDict,movieLocationDict,'train')
self.addNearestNeighbor(self.bootCV,self.featCV,moviesRatedByUserDict,movieLocationDict,'CV')
self.addNearestNeighbor(self.bootTest,self.featTest,moviesRatedByUserDict,movieLocationDict,'test')
### Baidu Dataset Specific Features ###
# ---- ---- Movie Tag Features ---- ---- #
elif self.featureSet == 'BasicMovieTag':
print('...Adding Basic Movie Tag Data')
tagDict = self.movieTagDict()
self.basicMovieTag(self.bootTrain,self.featTrain,tagDict,'train')
self.basicMovieTag(self.bootCV,self.featCV,tagDict,'CV')
self.basicMovieTag(self.bootTest,self.featTest,tagDict,'test')
self.libFMFormat(2)
elif self.featureSet == 'RelatedMovieTagThreshold':
print('...Adding Related Movie Tag Threshold Data')
threshold = 6
movieSharedTagDict, maxTags = self.movieSharedTagDict(threshold)
userLocationDict, movieLocationDict = self.userMovieLocationDict(True,True)
self.relatedMovieTagThreshold(self.bootTrain,self.featTrain, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict,threshold,'train')
self.relatedMovieTagThreshold(self.bootCV,self.featCV, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict,threshold,'CV')
self.relatedMovieTagThreshold(self.bootTest,self.featTest, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict,threshold,'test')
elif self.featureSet == 'RelatedMovieTagThreshold2':
print('...Adding Related Movie Tag Threshold 2 Data')
threshold = 6
movieSharedTagDict, maxTags = self.movieSharedTagDict(threshold)
userLocationDict, movieLocationDict = self.userMovieLocationDict(True,True)
moviesRatedByUserDict = self.moviesRatedByUserDict()
self.relatedMovieTagThreshold2(self.bootTrain,self.featTrain, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict,moviesRatedByUserDict,'train')
self.relatedMovieTagThreshold2(self.bootCV,self.featCV, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict,moviesRatedByUserDict,'CV')
self.relatedMovieTagThreshold2(self.bootTest,self.featTest, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict,moviesRatedByUserDict,'test')
# ---- ---- User History Features ---- ---- #
elif self.featureSet == 'UserHistory':
print('...Adding User History Data')
moviesRatedByUserDict = self.moviesRatedByUserDict()
userHistoryDict = self.userHistoryDict()
movieLocationDict = self.userMovieLocationDict(False,True)
self.userHistory(self.bootTrain,self.featTrain,userHistoryDict,movieLocationDict,moviesRatedByUserDict,'train')
self.userHistory(self.bootCV,self.featCV,userHistoryDict,movieLocationDict,moviesRatedByUserDict,'CV')
self.userHistory(self.bootTest,self.featTest,userHistoryDict,movieLocationDict,moviesRatedByUserDict,'test')
# ---- ---- User Social Features ---- ---- #
elif self.featureSet == 'UserSocial':
print('...Adding User Social Data')
userLocationDict, movieLocationDict = self.userMovieLocationDict(True,True)
userSocialDict = self.userSocialDictReader()
self.userSocial(self.bootTrain,self.featTrain,userLocationDict,movieLocationDict,userSocialDict,'train')
self.userSocial(self.bootCV,self.featCV,userLocationDict,movieLocationDict,userSocialDict,'CV')
self.userSocial(self.bootTest,self.featTest,userLocationDict,movieLocationDict,userSocialDict,'test')
### End Baidu Dataset Specific Features ###
def addNearestNeighbor(self,finPath, foutPath,moviesRatedByUserDict,movieLocationDict,step):
#-----------------------------------------------------------------
# creates sparse matrix where non-user/movie entries given as column:rating/m
# where m is the total number of movies rated by the user
#-----------------------------------------------------------------
lineCount= self.lineCount(finPath)
counter = 0
offset = len(movieLocationDict)
fin = open(finPath, 'r')
fout = open(foutPath + '.libfm', 'w')
for line in fin:
line.replace('\n', '')
columns = line.split('\t')
user = columns[0]
movie = columns[1]
rating = columns[2]
movCol = movieLocationDict[movie]
string=''
m = len(moviesRatedByUserDict[user]) # num of movies rated by user
for mov in moviesRatedByUserDict[user]:
rate = moviesRatedByUserDict[user][mov] # other movie's rating
location = str( int(movieLocationDict[mov]) + offset )
val = str( '{0:.4f}'.format( float(rate)/m ) ) # r/m
string = string + location+':'+val+' '
string=string[:-1] # gets rid of the extra space on the end
fout.write(rating[:-1]+' '+movCol+':1 '+string+'\n')
self.printProgress(counter, lineCount,step)
counter +=1
self.printProgressDone(step)
fin.close()
fout.close()
def basicMovieTag(self,finPath, foutPath, tagDict,step):
#-----------------------------------------------------------------
# creates new data set with movie tag info by appending tags as columns
# Output data still needs to by formatted for LibFM
#-----------------------------------------------------------------
lineCount= self.lineCount(finPath)
counter=0
dataSet = open(finPath,'r')
dataSetWithTags = open(foutPath,'w')
for line in dataSet:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
movie = columns[1]
if movie in tagDict:
string =''
for tag in tagDict[movie]:
string=string+tag+'\t'
string[:-1] # gets rid of extra tab on end
dataSetWithTags.write(line+'\t'+string+'\n')
else:
dataSetWithTags.write(line+'\n')
self.printProgress(counter, lineCount,step)
counter +=1
self.printProgressDone(step)
dataSet.close()
dataSetWithTags.close()
def relatedMovieTagThreshold(self,finPath, foutPath, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict, threshold,step):
#-----------------------------------------------------------------
# creates sparse matrix using movie tags with userID, movieID, then columns
# with movies that share at least n tags are given a (n-t)/max value, max
# is most tags shared between any given pair
#-----------------------------------------------------------------
lineCount= self.lineCount(finPath)
counter=0
offset = len(movieLocationDict)
dataSet = open(finPath,'r')
fout = open(foutPath + '.libfm','w')
for line in dataSet:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
user=columns[0]
movie = columns[1]
rating= columns[2]
movCol = movieLocationDict[movie]
userCol = userLocationDict[user]
if movie in movieSharedTagDict:
string=''
for tup in movieSharedTagDict[movie]:
mov2 = tup[0]
if mov2 in movieLocationDict: # some movies in tag data are not in training set
num = tup[1]
val = str( '{0:.4f}'.format( (num-threshold+1)/maxTags ) ) # value
loc = int(movieLocationDict[mov2])+offset
string=string+str(loc)+':'+val+' '
string=string[:-1]
fout.write(rating+' '+userCol+':1 '+movCol+':1 '+string+'\n')
else:
fout.write(rating+' '+userCol+':1 '+movCol+':1\n')
self.printProgress(counter, lineCount, step)
counter +=1
self.printProgressDone(step)
dataSet.close()
fout.close()
def relatedMovieTagThreshold2(self,finPath, foutPath, movieSharedTagDict, maxTags, userLocationDict, movieLocationDict,moviesRatedByUserDict,step):
#-----------------------------------------------------------------
# creates sparse matrix using movie tags with userID, movieID, then columns
# with movies that share at least n tags and have been rated by same user are assigned a value of
# (n/maxTags + rating/m), m is total number of movies rated by user; otherwise val is just n/maxTags
#-----------------------------------------------------------------
lineCount= self.lineCount(finPath)
counter=0
offset = len(movieLocationDict)
dataSet = open(finPath,'r')
fout = open(foutPath + '.libfm','w')
for line in dataSet:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
user=columns[0]
movie = columns[1]
rating= columns[2]
movCol = movieLocationDict[movie]
userCol = userLocationDict[user]
if movie in movieSharedTagDict and movie in movieLocationDict:
string=''
for tup in movieSharedTagDict[movie]:
mov2 = tup[0]
numTags = tup[1]
if mov2 in movieLocationDict:
if mov2 in moviesRatedByUserDict[user]:
r2 = moviesRatedByUserDict[user][mov2] # user rating of mov2
m = len(moviesRatedByUserDict[user])
val = str( '{0:.4f}'.format( (numTags/maxTags)+(float(r2)/m) )) # value
else:
val = str( '{0:.4f}'.format( numTags/maxTags) ) # value
loc = int(movieLocationDict[mov2])+offset
string=string+str(loc)+':'+val+' '
string=string[:-1]
fout.write(rating+' '+userCol+':1 '+movCol+':1 '+string+'\n')
else:
fout.write(rating+' '+userCol+':1 '+movCol+':1\n')
self.printProgress(counter, lineCount, step)
counter +=1
self.printProgressDone(step)
dataSet.close()
fout.close()
def userHistory(self,finPath, foutPath,userHistoryDict,movieLocationDict,moviesRatedByUserDict,step):
#-----------------------------------------------------------------
# creates sparse matrix using user history using movieID,
# then rating/n for each movie in user history and rated, where n is total viewed
# and simply 1/n for each movie in history and unrated by user
#-----------------------------------------------------------------
lineCount= self.lineCount(finPath)
counter=0
offset = len(movieLocationDict)
fin = open(finPath, 'r')
fout = open(foutPath + '.libfm', 'w')
for line in fin:
line.replace('\n', '')
columns = line.split('\t')
user = columns[0]
movie = columns[1]
rating = columns[2]
movCol = movieLocationDict[movie]
string=''
n = max(len(moviesRatedByUserDict[user]), len(userHistoryDict[user]) )
for mov in userHistoryDict[user]:
if mov in moviesRatedByUserDict[user]:
rate = float(moviesRatedByUserDict[user][mov])
else:
rate = 1
location = str( int(movieLocationDict[mov]) + offset )
val = str( '{0:.4f}'.format( rate/n ) )
string = string + location+':'+val+' '
string=string[:-1] # gets rid of the extra space on the end
fout.write(rating[:-1]+' '+movCol+':1 '+string+'\n')
self.printProgress(counter, lineCount, step)
counter +=1
self.printProgressDone(step)
fin.close()
fout.close()
def userSocial(self,finPath,foutPath,userLocationDict,movieLocationDict,userSocialDict,step):
#-----------------------------------------------------------------
# creates sparse matrix using user social data
#-----------------------------------------------------------------
lineCount= self.lineCount(finPath)
counter=0
offset = len(movieLocationDict)+len(userLocationDict)
fin = open(finPath,'r')
fout = open(foutPath + '.libfm','w')
for line in fin:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
user=columns[0]
movie = columns[1]
rating= columns[2]
movCol = movieLocationDict[movie]
userCol = userLocationDict[user]
string = ''
if user in userSocialDict:
m = len(userSocialDict[user]) # num of friends
val = str( '{0:.4f}'.format( 1/m ) )
for friend in userSocialDict[user]:
loc = str( int(userLocationDict[friend])+offset )
string = string + loc +':'+ val +' '
string=string[:-1]
fout.write(rating+' '+userCol+':1 '+movCol+':1 '+string+'\n')
self.printProgress(counter, lineCount, step)
counter +=1
self.printProgressDone(step)
fin.close()
fout.close()
#######################################
########## Helpful Functions ##########
def lineCount(self,finPath):
# returns a line count on the input file
fin = open(finPath, 'r')
lineCount=0
for line in fin:
lineCount+=1
fin.close()
return lineCount
def printProgress(self, counter, lineCount, step):
# prints to system how much of data has been formatted
printEvery = int(lineCount*0.05)
if printEvery < 1:
printEvery=1
if counter%printEvery==0:
print('{0}\r'.format( str('-- '+str('{0:.2f}'.format(counter/lineCount*100))+ ' percent of data formatted for ' + self.tag + ' Trial: ' + self.trial + ' Step: ' + step )) )
def printProgressDone(self, step):
# prints to system that formatting is completed
print('{0}\r'.format('-- Formatting Complete For ' + self.tag + ' Trial: ' + self.trial + ' Step: ' + step)) # space included on purpose to overwrite previous string
print() # to move to nextline
##################################################
########## Dictionary Reading Functions ##########
def userMovieLocationDict(self, user,movie):
# returns two dicts, one with users as keys and the other with movies as keys. Both hold location in sparse matrix as value
location = 1
if user: # first time through we get user locations
dataSet=open(self.cleanData, 'r')
userLocationDict={}
userSet=set()
for line in dataSet:
columns = line.split('\t')
user = columns[0]
if user not in userSet:
userSet.add(user)
userLocationDict[user]= str(location)
location += 1
dataSet.close()
if movie: # this time get movie locations
dataSet=open(self.cleanData, 'r')
movieLocationDict={}
movieSet=set()
for line in dataSet:
columns = line.split('\t')
movie = columns[1]
if movie not in movieSet:
movieSet.add(movie)
movieLocationDict[movie] = str(location)
location +=1
dataSet.close()
if user and not movie: # means movie is False
return userLocationDict
elif not user and movie: # means user is False
return movieLocationDict
else:
return userLocationDict, movieLocationDict # otherwise we want both (even if false, false)
def movieTagDict(self):
# returns a dict with movies as keys and list of tags as value
movieTags = open(self.movieTag, 'r')
tagDict = {}
for line in movieTags:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
movie = columns[0]
allTags = columns[1]
tagList = allTags.split(',')
tagDict[movie]=[]
for tag in tagList:
tagDict[movie].append(tag)
return tagDict
def movieSharedTagDict(self, threshold):
# returns a dict with movies as keys and list of (movie, num shared tag) tuples
# where threshold cuts out any movie pairs that do not share at least $threshold many tags
sharedTags = open(self.sharedTag, 'r')
maxTag = 0 # max number of shared tags between all movie pairs
movieSet = set()
tagDict = {}
for line in sharedTags:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
movie1 = columns[0]
movie2 = columns[1]
numTags = int(columns[2])
if numTags >= threshold:
if numTags > maxTag:
maxTag = numTags
if movie1 not in movieSet:
tagDict[movie1]=[]
movieSet.add(movie1)
tagDict[movie1].append( (movie2, numTags) ) # movie2 is a string ID, numTags is an int
return tagDict, maxTag
def movieTagAndLocationDict(self,startVal):
# returns two dicts, one of tags by movie, the other
# of movieTag locations, where first location is at StartVal
movieTags = open(self.movieTag, 'r')
tagDict = {}
tagLocationDict={}
tagSet=set()
location = startVal
for line in movieTags:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
movie = columns[0]
allTags = columns[1]
tagList = allTags.split(',')
tagDict[movie]=[]
for tag in tagList:
tagDict[movie].append(tag)
if tag not in tagSet:
tagLocationDict[tag]=str(location)
location += 1
return tagDict, tagLocationDict
def moviesRatedByUserDict(self):
# returns a dict of dicts with user as keys and a dict of movie:rating as values
userSet=set()
moviesRatedByUserDict = {}
dataSet=open(self.cleanData, 'r')
for line in dataSet:
line.replace('\n', '')
columns = line.split('\t')
user = columns[0]
movie = columns[1]
rating = columns[2]
if user not in userSet:
userSet.add(user)
moviesRatedByUserDict[user]={}
moviesRatedByUserDict[user][movie]= rating
dataSet.close()
return moviesRatedByUserDict
def movieTagAsKeyDict(self):
# returns a dict with tags as keys and all movies that share tag as values
movieSet = set()
data=open(self.cleanData, 'r')
for line in data:
columns = line.split('\t')
movie = columns[1]
if movie not in movieSet:
movieSet.add(movie)
data.close()
tagSet = set()
movieTags = open(self.movieTag, 'r')
movieTagAsKeyDict = {}
for line in movieTags:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
movie = columns[0]
if movie in movieSet: # we only care about movie tags for movies in our data set
allTags = columns[1]
tagList = allTags.split(',')
for tag in tagList:
if tag not in tagSet:
tagSet.add(tag)
movieTagAsKeyDict[tag]=[]
movieTagAsKeyDict[tag].append(movie)
return movieTagAsKeyDict
def meanMovieRatingDict(self):
# returns dictionary with average rating for each movie, max rating
maxRating = 0
data = open(self.meanMovieRating,'r')
meanMovieRatingDict = {}
for line in data:
line = line.replace('\n', '')
columns = line.split('\t')
movie = columns[0]
rating = columns[1]
meanMovieRatingDict[movie]=rating
if float(rating) > maxRating:
maxRating = float(rating)
return meanMovieRatingDict, maxRating
def userHistoryDict(self):
# returns a dict with user as keys and list of history tags (movies) as values
historyData = open(self.historyTag, 'r')
userSet=set()
historyDict = {}
for line in historyData:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
user = columns[0]
movieString = columns[1]
movieList = movieString.split(',')
if user not in userSet:
userSet.add(user)
historyDict[user]=[]
for movie in movieList:
historyDict[user].append(movie)
historyData.close()
return historyDict
def userSocialDictReader(self):
# returns a dict with user as keys and list of friends as values
data = open(self.social, 'r')
socialDict = {}
for line in data:
if line != '\n':
line = line.replace('\n', '')
columns = line.split('\t')
user = columns[0]
friendString = columns[1]
friendList = friendString.split(',')
socialDict[user]=[]
for friend in friendList:
socialDict[user].append(friend)
data.close()
return socialDict
###############################
############# Run #############
def run(self,sproc,subprocesses):
cvStr = self.libFMBinary + ' -task r -train ' + \
self.runTrain + ' -test ' + \
self.runCV + ' -init_stdev ' + \
self.initStd + ' -dim \'' + \
self.globalBias + ','+ \
self.oneWay + ','+ \
self.dims + '\' -iter ' + \
self.strItr + ' -rlog '+ \
self.logCV + ' -out ' + \
self.predCVTmp
cvArr = cvStr.split()
testStr = self.libFMBinary + ' -task r -train ' + \
self.runTrain + ' -test ' + \
self.runTest + ' -init_stdev ' + \
self.initStd + ' -dim \'' + \
self.globalBias + ','+ \
self.oneWay + ','+ \
self.dims + '\' -iter ' + \
self.strItr + ' -rlog '+ \
self.logTest + ' -out ' + \
self.predTestTmp
testArr = testStr.split()
### CV ###
pCV = sproc.Popen(cvArr,shell=False)
pTest = sproc.Popen(testArr,shell=False)
subprocesses.append(pTest)
subprocesses.append(pCV)
|
ChrisRackauckas/TBEEF
|
utils/FMModel.py
|
Python
|
mit
| 29,342
|
from django import forms
class VaryingField(forms.Field):
def __init__(self, crucible, varying_on_field, *args, **kwargs):
self.crucible = crucible
self.varying_on_field = varying_on_field
self.curried_args = args
self.curried_kwargs = kwargs
def determine_field(self, form):
value = form[self.varying_on_field].value()
return self.crucible(form, value, *self.curried_args, **self.curried_kwargs)
def varies_on(what_name, *args, **kwargs):
def inner(fn):
return VaryingField(fn, what_name, *args, **kwargs)
return inner
|
chrisdickinson/django-butter
|
butter/fields/varying.py
|
Python
|
bsd-3-clause
| 596
|
import logging
import decode_bcd
import decode_common
import hexdump
def get_table_slots ( req ):
return 20
# 0 ... max-1
# return dict of: score, shortname
def get_table_slot_dict ( req, blockdata, n ):
# first 4b are 'high score' for top of screen
# then 4b per score (1 through 20)
# then 6b per rank for initials (6 char initials)
a = decode_common.get_array ( blockdata, 4 + ( 4 * n ), 4 )
hi = ( decode_bcd.bcd_byte_to_int ( a [ 1 ] ) * 100000 ) + \
( decode_bcd.bcd_byte_to_int ( a [ 2 ] ) * 1000 ) + \
( decode_bcd.bcd_byte_to_int ( a [ 3 ] ) * 10 )
a = decode_common.get_array ( blockdata, 84 + ( 12 * n ), 12 )
initials = decode_char ( a [ 1 ] ) + \
decode_char ( a [ 3 ] ) + \
decode_char ( a [ 5 ] ) + \
decode_char ( a [ 7 ] ) + \
decode_char ( a [ 9 ] ) + \
decode_char ( a [ 11 ] )
d = dict()
d [ 'score' ] = hi
d [ 'shortname' ] = initials
return d
# -------------------------------------
def decode_char ( c ):
# 0x0a == A 0x0b == B .. etc
# 0x23 == Z (0x023 -> decimal 35, works out .. 10+26 == 36)
# note: 26 = '.'
if c == 0x26:
return '.'
return chr ( ord ( 'A' ) + c - 10 )
|
skeezix/compo4all
|
spaghetti-server/g_ms_fshark.py
|
Python
|
gpl-2.0
| 1,291
|
from django.conf.urls.defaults import *
from order.views import order_pizza, order_bread
urlpatterns = patterns('order.views',
url(r'^$', 'place_order'),
url(r'pizza/(?P<order_id>\d+)$', order_pizza),
url(r'bread/(?P<order_id>\d+)$', order_bread),
)
|
ajpocus/pizzeria
|
order/urls.py
|
Python
|
bsd-3-clause
| 264
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
# File : dirac-create-distribution-tarball
# Author : Adria Casajus
########################################################################
"""
Create tarballs for a given DIRAC release
"""
import sys
import os
import shutil
import tempfile
import subprocess
import shlex
from DIRAC.Core.Utilities.File import mkDir
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities import Distribution, Subprocess
__RCSID__ = "$Id$"
class TarModuleCreator( object ):
VALID_VCS = ( 'svn', 'git', 'hg', 'file' )
class Params( object ):
def __init__( self ):
self.version = False
self.destination = False
self.sourceURL = False
self.name = False
self.vcs = False
self.vcsBranch = False
self.vcsPath = False
self.relNotes = False
self.outRelNotes = False
self.extensionVersion = None
self.extensionSource = None
self.extjspath = None
def isOK( self ):
if not self.version:
return S_ERROR( "No version defined" )
if not self.sourceURL:
return S_ERROR( "No Source URL defined" )
if not self.name:
return S_ERROR( "No name defined" )
if self.vcs and self.vcs not in TarModuleCreator.VALID_VCS:
return S_ERROR( "Invalid VCS %s" % self.vcs )
return S_OK()
def setVersion( self, opVal ):
self.version = opVal
return S_OK()
def setDestination( self, opVal ):
self.destination = os.path.realpath( opVal )
return S_OK()
def setSourceURL( self, opVal ):
self.sourceURL = opVal
return S_OK()
def setName( self, opVal ):
self.name = opVal
return S_OK()
def setVCS( self, opVal ):
self.vcs = opVal.lower()
if self.vcs == 'subversion':
self.vcs = 'svn'
elif self.vcs == 'mercurial':
self.vcs = 'hg'
return S_OK()
def setVCSBranch( self, opVal ):
self.vcsBranch = opVal
return S_OK()
def setVCSPath( self, opVal ):
self.vcsPath = opVal
return S_OK()
def setReleaseNotes( self, opVal ):
self.relNotes = opVal
return S_OK()
def setOutReleaseNotes( self, opVal ):
self.outRelNotes = True
return S_OK()
def setExtensionVersion( self, opVal ):
self.extensionVersion = opVal
return S_OK()
def setExtensionSource(self, opVal):
self.extensionSource = opVal
return S_OK()
def setExtJsPath( self, opVal ):
self.extjspath = opVal
return S_OK()
def __init__( self, params ):
self.params = params
def __checkDestination( self ):
if not self.params.destination:
self.params.destination = tempfile.mkdtemp( 'DIRACTarball' )
gLogger.notice( "Will generate tarball in %s" % self.params.destination )
mkDir(self.params.destination)
return S_OK()
def __discoverVCS( self ):
sourceURL = self.params.sourceURL
if os.path.expanduser( sourceURL ).find( "/" ) == 0:
sourceURL = os.path.expanduser( sourceURL )
self.params.vcs = "file"
return True
if sourceURL.find( ".git" ) == len( sourceURL ) - 4:
self.params.vcs = "git"
return True
for vcs in TarModuleCreator.VALID_VCS:
if sourceURL.find( vcs ) == 0:
self.params.vcs = vcs
return True
return False
def __checkoutSource( self, moduleName = None, sourceURL = None, tagVersion = None ):
"""
This method will checkout a given module from a given repository: svn, hg, git
:param str moduleName: The name of the Module: for example: LHCbWebDIRAC
:param str sourceURL: The code repository: ssh://git@gitlab.cern.ch:7999/lhcb-dirac/LHCbWebDIRAC.git
:param str tagVersion: the tag for example: v4r3p6
"""
if not self.params.vcs:
if not self.__discoverVCS():
return S_ERROR( "Could not autodiscover VCS" )
gLogger.info( "Checking out using %s method" % self.params.vcs )
if self.params.vcs == "file":
return self.__checkoutFromFile( moduleName, sourceURL )
elif self.params.vcs == "svn":
return self.__checkoutFromSVN( moduleName, sourceURL, tagVersion )
elif self.params.vcs == "hg":
return self.__checkoutFromHg( moduleName, sourceURL )
elif self.params.vcs == "git":
return self.__checkoutFromGit( moduleName, sourceURL, tagVersion )
return S_ERROR( "OOPS. Unknown VCS %s!" % self.params.vcs )
def __checkoutFromFile( self, moduleName = None, sourceURL = None ):
"""
This method checkout a given tag from a file
Note: we can checkout any project form a file
:param str moduleName: The name of the Module
:param str sourceURL: The code repository
"""
if not moduleName:
moduleName = self.params.name
if not sourceURL:
sourceURL = self.params.sourceURL
if sourceURL.find( "file://" ) == 0:
sourceURL = sourceURL[ 7: ]
sourceURL = os.path.realpath( sourceURL )
try:
pyVer = sys.version_info
if pyVer[0] == 2 and pyVer[1] < 6:
shutil.copytree( sourceURL,
os.path.join( self.params.destination, moduleName ),
symlinks = True )
else:
shutil.copytree( sourceURL,
os.path.join( self.params.destination, moduleName ),
symlinks = True,
ignore = shutil.ignore_patterns( '.svn', '.git', '.hg', '*.pyc', '*.pyo' ) )
except Exception as e:
return S_ERROR( "Could not copy data from source URL: %s" % str( e ) )
return S_OK()
def __checkoutFromSVN( self, moduleName = None, sourceURL = None, tagVersion = None ):
"""
This method checkout a given tag from a SVN repository.
Note: we can checkout any project form a SVN repository
:param str moduleName: The name of the Module
:param str sourceURL: The code repository
:param str tagVersion: the tag for example: v4r3p6
"""
if not moduleName:
moduleName = self.params.name
if not sourceURL:
sourceURL = self.params.sourceURL
if not tagVersion:
tagVersion = self.params.version
cmd = "svn export --trust-server-cert --non-interactive '%s/%s' '%s'" % ( sourceURL, tagVersion,
os.path.join( self.params.destination, moduleName ) )
gLogger.verbose( "Executing: %s" % cmd )
result = Subprocess.systemCall( 900, shlex.split(cmd) )
if not result[ 'OK' ]:
return S_ERROR( "Error while retrieving sources from SVN: %s" % result[ 'Message' ] )
exitStatus, stdData, errData = result[ 'Value' ]
if exitStatus:
return S_ERROR( "Error while retrieving sources from SVN: %s" % "\n".join( [ stdData, errData ] ) )
return S_OK()
def __checkoutFromHg( self, moduleName = None, sourceURL = None ):
"""
This method checkout a given tag from a hg repository.
Note: we can checkout any project form a hg repository
:param str moduleName: The name of the Module
:param str sourceURL: The code repository
"""
if not moduleName:
moduleName = self.params.name
if not sourceURL:
sourceURL = self.params.sourceURL
if self.params.vcsBranch:
brCmr = "-b %s" % self.params.vcsBranch
else:
brCmr = ""
fDirName = os.path.join( self.params.destination, moduleName )
cmd = "hg clone %s '%s' '%s.tmp1'" % ( brCmr,
sourceURL,
fDirName )
gLogger.verbose( "Executing: %s" % cmd )
if os.system( cmd ):
return S_ERROR( "Error while retrieving sources from hg" )
hgArgs = [ "--cwd '%s.tmp1'" % fDirName ]
if self.params.vcsPath:
hgArgs.append( "--include '%s/*'" % self.params.vcsPath )
hgArgs.append( "'%s.tmp2'" % fDirName )
cmd = "hg archive %s" % " ".join( hgArgs )
gLogger.verbose( "Executing: %s" % cmd )
exportRes = os.system( cmd )
shutil.rmtree( "%s.tmp1" % fDirName )
if exportRes:
return S_ERROR( "Error while exporting from hg" )
#TODO: tmp2/path to dest
source = "%s.tmp2" % fDirName
if self.params.vcsPath:
source = os.path.join( source, self.params.vcsPath )
if not os.path.isdir( source ):
shutil.rmtree( "%s.tmp2" % fDirName )
return S_ERROR( "Path %s does not exist in repo" )
os.rename( source, fDirName )
shutil.rmtree( "%s.tmp2" % fDirName )
return S_OK()
@classmethod
def replaceKeywordsWithGit( cls, dirToDo ):
for fileName in os.listdir( dirToDo ):
objPath = os.path.join( dirToDo, fileName )
if os.path.isdir( objPath ):
TarModuleCreator.replaceKeywordsWithGit( objPath )
elif os.path.isfile( objPath ):
if fileName.find( '.py', len( fileName ) - 3 ) == len( fileName ) - 3 :
with open( objPath, "r" ) as fd:
fileContents = fd.read()
changed = False
for keyWord, cmdArgs in ( ( '$Id$', '--pretty="%h (%ad) %an <%aE>" --date=iso' ),
( '$SHA1$', '--pretty="%H"' ) ):
foundKeyWord = fileContents.find( keyWord )
if foundKeyWord > -1 :
po2 = subprocess.Popen( "git log -n 1 %s '%s' 2>/dev/null" % ( cmdArgs, fileName ),
stdout = subprocess.PIPE, cwd = dirToDo, shell = True )
exitStatus = po2.wait()
if po2.returncode:
continue
toReplace = po2.stdout.read().strip()
toReplace = "".join( i for i in toReplace if ord( i ) < 128 )
fileContents = fileContents.replace( keyWord, toReplace )
changed = True
with open( objPath, "w" ) as fd:
fd.write( fileContents )
def __checkoutFromGit( self, moduleName = None, sourceURL = None, tagVersion = None ):
"""
This method checkout a given tag from a git repository.
Note: we can checkout any project form a git repository
:param str moduleName: The name of the Module: for example: LHCbWebDIRAC
:param str sourceURL: The code repository: ssh://git@gitlab.cern.ch:7999/lhcb-dirac/LHCbWebDIRAC.git
:param str tagVersion: the tag for example: v4r3p6
"""
if not moduleName:
moduleName = self.params.name
if not sourceURL:
sourceURL = self.params.sourceURL
if not tagVersion:
tagVersion = self.params.version
if self.params.vcsBranch:
brCmr = "-b %s" % self.params.vcsBranch
else:
brCmr = ""
fDirName = os.path.join( self.params.destination, moduleName )
cmd = "git clone %s '%s' '%s'" % ( brCmr,
sourceURL,
fDirName )
gLogger.verbose( "Executing: %s" % cmd )
if os.system( cmd ):
return S_ERROR( "Error while retrieving sources from git" )
branchName = "DIRACDistribution-%s" % os.getpid()
isTagCmd = "( cd '%s'; git tag -l | grep '%s' )" % ( fDirName, tagVersion )
if os.system( isTagCmd ):
#No tag found, assume branch
branchSource = 'origin/%s' % tagVersion
else:
branchSource = tagVersion
cmd = "( cd '%s'; git checkout -b '%s' '%s' )" % ( fDirName, branchName, branchSource )
gLogger.verbose( "Executing: %s" % cmd )
exportRes = os.system( cmd )
#Add the keyword substitution
gLogger.notice( "Replacing keywords (can take a while)..." )
self.replaceKeywordsWithGit( fDirName )
shutil.rmtree( "%s/.git" % fDirName, ignore_errors=True )
shutil.rmtree( "%s/tests" % fDirName, ignore_errors=True )
shutil.rmtree( "%s/docs" % fDirName, ignore_errors=True )
shutil.rmtree( "%s/tests" % self.params.destination, ignore_errors=True )
shutil.rmtree( "%s/docs" % self.params.destination, ignore_errors=True )
if exportRes:
return S_ERROR( "Error while exporting from git" )
return S_OK()
def __loadReleaseNotesFile( self ):
if not self.params.relNotes:
relNotes = os.path.join( self.params.destination, self.params.name, "release.notes" )
else:
relNotes = self.params.relNotes
if not os.path.isfile( relNotes ):
return S_OK( "" )
try:
with open( relNotes, "r" ) as fd:
releaseContents = fd.readlines()
except Exception as excp:
return S_ERROR( "Could not open %s: %s" % ( relNotes, excp ) )
gLogger.info( "Loaded %s" % relNotes )
relData = []
version = False
feature = False
lastKey = False
for rawLine in releaseContents:
line = rawLine.strip()
if not line:
continue
if line[0] == "[" and line[-1] == "]":
version = line[1:-1].strip()
relData.append( ( version, { 'comment' : [], 'features' : [] } ) )
feature = False
lastKey = False
continue
if line[0] == "*":
feature = line[1:].strip()
relData[-1][1][ 'features' ].append( [ feature, {} ] )
lastKey = False
continue
if not feature:
relData[ -1 ][1][ 'comment' ].append( rawLine )
continue
keyDict = relData[-1][1][ 'features' ][-1][1]
foundKey = False
for key in ( 'BUGFIX', 'BUG', 'FIX', "CHANGE", "NEW", "FEATURE" ):
if line.find( "%s:" % key ) == 0:
line = line[ len( key ) + 2: ].strip()
elif line.find( "%s " % key ) == 0:
line = line[ len( key ) + 1: ].strip()
else:
continue
foundKey = key
break
if foundKey in ( 'BUGFIX', 'BUG', 'FIX' ):
foundKey = 'BUGFIX'
elif foundKey in ( 'NEW', 'FEATURE' ):
foundKey = 'FEATURE'
if foundKey:
if foundKey not in keyDict:
keyDict[ foundKey ] = []
keyDict[ foundKey ].append( line )
lastKey = foundKey
elif lastKey:
keyDict[ lastKey ][-1] += " %s" % line
return S_OK( relData )
def __generateRSTFile( self, releaseData, rstFileName, pkgVersion, singleVersion ):
rstData = []
parsedPkgVersion = Distribution.parseVersionString( pkgVersion )
for version, verData in releaseData:
if singleVersion and version != pkgVersion:
continue
if Distribution.parseVersionString( version ) > parsedPkgVersion:
continue
versionLine = "Version %s" % version
rstData.append( "" )
rstData.append( "=" * len( versionLine ) )
rstData.append( versionLine )
rstData.append( "=" * len( versionLine ) )
rstData.append( "" )
if verData[ 'comment' ]:
rstData.append( "\n".join( verData[ 'comment' ] ) )
rstData.append( "" )
for feature, featureData in verData[ 'features' ]:
if not featureData:
continue
rstData.append( feature )
rstData.append( "=" * len( feature ) )
rstData.append( "" )
for key in sorted( featureData ):
rstData.append( key.capitalize() )
rstData.append( ":" * ( len( key ) + 5 ) )
rstData.append( "" )
for entry in featureData[ key ]:
rstData.append( " - %s" % entry )
rstData.append( "" )
#Write releasenotes.rst
try:
rstFilePath = os.path.join( self.params.destination, self.params.name, rstFileName )
with open( rstFilePath, "w" ) as fd:
fd.write( "\n".join( rstData ) )
except Exception as excp:
return S_ERROR( "Could not write %s: %s" % ( rstFileName, excp ) )
return S_OK()
def __generateReleaseNotes( self ):
result = self.__loadReleaseNotesFile()
if not result[ 'OK' ]:
return result
releaseData = result[ 'Value' ]
if not releaseData:
gLogger.info( "release.notes not found. Trying to find releasenotes.rst" )
for rstFileName in ( "releasenotes.rst", "releasehistory.rst" ):
result = self.__compileReleaseNotes( rstFileName )
if result[ 'OK' ]:
gLogger.notice( "Compiled %s file!" % rstFileName )
else:
gLogger.warn( result[ 'Message' ] )
return S_OK()
gLogger.info( "Loaded release.notes" )
for rstFileName, singleVersion in ( ( "releasenotes.rst", True ),
( "releasehistory.rst", False ) ):
result = self.__generateRSTFile( releaseData, rstFileName, self.params.version,
singleVersion )
if not result[ 'OK' ]:
gLogger.error( "Could not generate %s: %s" % ( rstFileName, result[ 'Message' ] ) )
continue
result = self.__compileReleaseNotes( rstFileName )
if not result[ 'OK' ]:
gLogger.error( "Could not compile %s: %s" % ( rstFileName, result[ 'Message' ] ) )
continue
gLogger.notice( "Compiled %s file!" % rstFileName )
return S_OK()
def __compileReleaseNotes( self, rstFile ):
notesName = rstFile
for ext in ( '.rst', '.txt' ):
if rstFile[ -len( ext ): ] == ext:
notesName = rstFile[ :-len( ext ) ]
break
relNotesRST = os.path.join( self.params.destination, self.params.name, rstFile )
if not os.path.isfile( relNotesRST ):
if self.params.relNotes:
return S_ERROR( "Defined release notes %s do not exist!" % self.params.relNotes )
return S_ERROR( "No release notes found in %s. Skipping" % relNotesRST )
try:
import docutils.core
except ImportError:
return S_ERROR( "Docutils is not installed. Please install and rerun" )
#Find basename
baseRSTFile = rstFile
for ext in ( '.rst', '.txt' ):
if baseRSTFile[ -len( ext ): ] == ext:
baseRSTFile = baseRSTFile[ :-len( ext ) ]
break
baseNotesPath = os.path.join( self.params.destination, self.params.name, baseRSTFile )
#To HTML
try:
with open( relNotesRST ) as fd:
rstData = fd.read()
except Exception as excp:
return S_ERROR( "Could not read %s: %s" % ( relNotesRST, excp ) )
try:
parts = docutils.core.publish_parts( rstData, writer_name = 'html' )
except Exception as excp:
return S_ERROR( "Cannot generate the html %s: %s" % ( baseNotesPath, str( excp ) ) )
baseList = [ baseNotesPath ]
if self.params.outRelNotes:
gLogger.notice( "Leaving a copy of the release notes outside the tarballs" )
baseList.append( "%s/%s.%s.%s" % ( self.params.destination, baseRSTFile, self.params.name, self.params.version ) )
for baseFileName in baseList:
htmlFileName = baseFileName + ".html"
try:
with open( htmlFileName, "w" ) as fd:
fd.write( parts[ 'whole' ] )
except Exception as excp:
return S_ERROR( "Could not write %s: %s" % ( htmlFileName, repr( excp ).replace( ',)', ')' ) ) )
#To pdf
pdfCmd = "rst2pdf '%s' -o '%s.pdf'" % ( relNotesRST, baseFileName )
gLogger.verbose( "Executing %s" % pdfCmd )
if os.system( pdfCmd ):
gLogger.warn( "Could not generate PDF version of %s" % baseNotesPath )
#Unlink if not necessary
if False and not cliParams.relNotes:
try:
os.unlink( relNotesRST )
except:
pass
return S_OK()
def __generateTarball( self ):
destDir = self.params.destination
tarName = "%s-%s.tar.gz" % ( self.params.name, self.params.version )
tarfilePath = os.path.join( destDir, tarName )
dirToTar = os.path.join( self.params.destination, self.params.name )
if self.params.name in os.listdir( dirToTar ):
dirToTar = os.path.join( dirToTar, self.params.name )
result = Distribution.writeVersionToInit( dirToTar, self.params.version )
if not result[ 'OK' ]:
return result
result = Distribution.createTarball( tarfilePath, dirToTar )
if not result[ 'OK' ]:
return S_ERROR( "Could not generate tarball: %s" % result[ 'Error' ] )
#Remove package dir
shutil.rmtree( dirToTar )
gLogger.info( "Tar file %s created" % tarName )
return S_OK( tarfilePath )
def __compileWebApp( self ):
"""
This method is compile the DIRAC web framework
"""
dctArgs = []
if self.params.extjspath:
dctArgs.append( "-P '%s'" % self.params.extjspath )
destDir = self.params.destination
dctArgs.append( "-D '%s'" % destDir )
scriptName = os.path.join( "%s/WebAppDIRAC/scripts/" % destDir, "dirac-webapp-compile.py" )
if not os.path.isfile( scriptName ):
return S_ERROR ( "%s file does not exists!" % scriptName )
dctArgs.append( "-n '%s'" % self.params.name )
cmd = "'%s' %s" % ( scriptName, " ".join( dctArgs ) )
gLogger.verbose( "Executing %s" % cmd )
if os.system( cmd ) != 0:
return S_ERROR( "Failed to execute the command" )
return S_OK()
def create( self ):
if not isinstance( self.params, TarModuleCreator.Params ):
return S_ERROR( "Argument is not a TarModuleCreator.Params object " )
result = self.params.isOK()
if not result[ 'OK' ]:
return result
result = self.__checkDestination()
if not result[ 'OK' ]:
return result
result = self.__checkoutSource()
if not result[ 'OK' ]:
return result
shutil.rmtree( "%s/tests" % self.params.destination, ignore_errors=True )
shutil.rmtree( "%s/docs" % self.params.destination, ignore_errors=True )
result = self.__generateReleaseNotes()
if not result[ 'OK' ]:
gLogger.error( "Won't generate release notes: %s" % result[ 'Message' ] )
if 'Web' in self.params.name and self.params.name != 'Web':
# if we have an extension, we have to download, because it will be
# required to compile the code
if self.params.extensionVersion and self.params.extensionSource:
# if extensionSource is not provided, the default one is used. self.params.soureURL....
result = self.__checkoutSource( "WebAppDIRAC", self.params.extensionSource, self.params.extensionVersion )
if not result['OK']:
return result
retVal = self.__compileWebApp()
if not retVal['OK']: #it can fail, if we do not have sencha cmd and extjs farmework installed
gLogger.warn( 'Web is not compiled: %s' % retVal['Message'] )
return self.__generateTarball()
if __name__ == "__main__":
cliParams = TarModuleCreator.Params()
Script.disableCS()
Script.addDefaultOptionValue( "/DIRAC/Setup", "Dummy" )
Script.registerSwitch( "v:", "version=", "version to tar", cliParams.setVersion )
Script.registerSwitch( "u:", "source=", "VCS path to retrieve sources from", cliParams.setSourceURL )
Script.registerSwitch( "D:", "destination=", "Destination where to build the tar files", cliParams.setDestination )
Script.registerSwitch( "n:", "name=", "Tarball name", cliParams.setName )
Script.registerSwitch( "z:", "vcs=", "VCS to use to retrieve the sources (try to find out if not specified)", cliParams.setVCS )
Script.registerSwitch( "b:", "branch=", "VCS branch (if needed)", cliParams.setVCSBranch )
Script.registerSwitch( "p:", "path=", "VCS path (if needed)", cliParams.setVCSPath )
Script.registerSwitch( "K:", "releasenotes=", "Path to the release notes", cliParams.setReleaseNotes )
Script.registerSwitch( "A", "notesoutside", "Leave a copy of the compiled release notes outside the tarball", cliParams.setOutReleaseNotes )
Script.registerSwitch( "e:", "extensionVersion=", "if we have an extension,\
we can provide the base module version (if it is needed): for example: v3r0", cliParams.setExtensionVersion )
Script.registerSwitch( "E:", "extensionSource=", "if we have an extension,\
we must provide code repository url", cliParams.setExtensionSource )
Script.registerSwitch( "P:", "extjspath=", "directory of the extjs library", cliParams.setExtJsPath )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'\nUsage:',
' %s <option> ...\n' % Script.scriptName,
' A source, name and version are required to build the tarball',
' For instance:',
' %s -n DIRAC -v v1r0 -z svn -u http://svnweb.cern.ch/guest/dirac/DIRAC/tags/DIRAC/v1r0' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = False )
result = cliParams.isOK()
if not result[ 'OK' ]:
gLogger.error( result[ 'Message' ] )
Script.showHelp()
sys.exit( 1 )
tmc = TarModuleCreator( cliParams )
result = tmc.create()
if not result[ 'OK' ]:
gLogger.error( "Could not create the tarball: %s" % result[ 'Message' ] )
sys.exit( 1 )
gLogger.always( "Tarball successfully created at %s" % result[ 'Value' ] )
sys.exit( 0 )
|
arrabito/DIRAC
|
Core/scripts/dirac-create-distribution-tarball.py
|
Python
|
gpl-3.0
| 25,052
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, record_only, ResourceGroupPreparer
class TestClusterScenarios(ScenarioTest):
@record_only()
@ResourceGroupPreparer(name_prefix='cli_test_monitor_log_analytics_cluster_c', parameter_name='rg1', key='rg1', location='centralus')
def test_monitor_log_analytics_cluster_default(self, rg1):
new_cluster_name = self.create_random_name('clitest-cluster-', 20)
sku_capacity = 1000
self.kwargs.update({
'new_cluster_name': new_cluster_name,
'sku_capacity': sku_capacity
})
self.cmd("monitor log-analytics cluster create -g {rg1} -n {new_cluster_name} --sku-capacity {sku_capacity}",
checks=[])
self.cmd("monitor log-analytics cluster show -g {rg1} -n {new_cluster_name}", checks=[
self.check('provisioningState', 'Succeeded'),
self.check('name', new_cluster_name),
self.check('sku.capacity', sku_capacity)
])
new_sku_capacity = 2000
self.kwargs.update({
'sku_capacity': new_sku_capacity
})
self.cmd("monitor log-analytics cluster update -g {rg1} -n {new_cluster_name} "
"--sku-capacity {sku_capacity}",
checks=[
self.check('sku.capacity', new_sku_capacity)
])
self.cmd("monitor log-analytics cluster show -g {rg1} -n {new_cluster_name}", checks=[
self.check('provisioningState', 'Succeeded'),
self.check('sku.capacity', new_sku_capacity)
])
self.cmd("monitor log-analytics cluster list -g {rg1}", checks=[
self.check('length(@)', 1)
])
self.cmd("monitor log-analytics cluster delete -g {rg1} -n {new_cluster_name} -y", checks=[])
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('monitor log-analytics cluster show -g {rg1} -n {new_cluster_name}')
@record_only()
def test_monitor_log_analytics_cluster_update_key(self):
new_key_name = 'key2'
new_key_version = 'dc814576e6b34de69a10b186a4723035'
self.kwargs.update({
'rg': 'azure-cli-test-scus',
'key_name': new_key_name,
'key_version': new_key_version,
'key_vault_uri': 'https://yu-vault-1.vault.azure.net/',
'cluster_name': 'yu-test-cluster2'
})
self.cmd("monitor log-analytics cluster update -g {rg} -n {cluster_name} --key-name {key_name} "
"--key-vault-uri {key_vault_uri} --key-version {key_version}",
checks=[])
self.cmd("monitor log-analytics cluster wait -g {rg} -n {cluster_name} --updated", checks=[])
self.cmd("monitor log-analytics cluster show -g {rg} -n {cluster_name}", checks=[
self.check('provisioningState', 'Succeeded'),
self.check('keyVaultProperties.keyName', new_key_name),
self.check('keyVaultProperties.keyVersion', new_key_version)
])
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/monitor/tests/latest/test_monitor_log_analytics_cluster.py
|
Python
|
mit
| 3,370
|
import piecash
from piecash_utilities.report import report, execute_report
@report(
title="My simplest report with a book",
name="piecash-simple-report-book",
menu_tip="A simple report that opens a book",
options_default_section="general",
)
def generate_report(
book_url,
):
with piecash.open_book(book_url, readonly=True, open_if_lock=True) as book:
return """<html>
<body>
Hello world from python !<br>
Book : {book_url}<br>
List of accounts : {accounts}
</body>
</html>""".format(
book_url=book_url,
accounts=[acc.fullname for acc in book.accounts],
)
if __name__ == '__main__':
execute_report(generate_report)
|
sdementen/gnucash-utilities
|
report_example/report_simplest_book.py
|
Python
|
mit
| 752
|
from django.contrib import admin
from .models import People
# Register your models here.
admin.site.register(People)
|
hhalmeida/corponovo
|
app/admin.py
|
Python
|
mit
| 117
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ProvisioningState(Enum):
unknown = "Unknown"
provisioning = "Provisioning"
succeeded = "Succeeded"
failed = "Failed"
class DiagnosticsLevel(Enum):
none = "None"
error = "Error"
all = "All"
class ColumnType(Enum):
boolean = "Boolean"
integer = "Integer"
number = "Number"
string = "String"
class ColumnFormat(Enum):
byte = "Byte"
char = "Char"
complex64 = "Complex64"
complex128 = "Complex128"
date_time = "Date-time"
date_time_offset = "Date-timeOffset"
double = "Double"
duration = "Duration"
float_enum = "Float"
int8 = "Int8"
int16 = "Int16"
int32 = "Int32"
int64 = "Int64"
uint8 = "Uint8"
uint16 = "Uint16"
uint32 = "Uint32"
uint64 = "Uint64"
class AssetType(Enum):
module = "Module"
resource = "Resource"
class InputPortType(Enum):
dataset = "Dataset"
class OutputPortType(Enum):
dataset = "Dataset"
class ParameterType(Enum):
string = "String"
int_enum = "Int"
float_enum = "Float"
enumerated = "Enumerated"
script = "Script"
mode = "Mode"
credential = "Credential"
boolean = "Boolean"
double = "Double"
column_picker = "ColumnPicker"
parameter_range = "ParameterRange"
data_gateway_name = "DataGatewayName"
|
SUSE/azure-sdk-for-python
|
unreleased/azure-mgmt-machinelearning/azure/mgmt/machinelearning/models/azure_ml_web_services_management_client_enums.py
|
Python
|
mit
| 1,816
|
""" waysoffib.py | Thu, Jan 26, 2017 | Roman S. Collins
Sources:
- S1: Dylan Murphy - Python tutoring
http://pastebin.com/89dxMK2L
- S2: 5 ways for fibonacci
https://technobeans.com/2012/04/16/5-ways-of-fibonacci-in-python/
"""
import sys
sys.setrecursionlimit(18000)
# Global declarations for
# generator
a, b = 0, 1
class WAYSOFFIB:
def __init__(self):
self.cata = {}
def catalogued(self, n):
if (n == 0) or (n == 1):
return n
if n in self.cata:
return self.cata[n]
return self.catalogued(n - 1) + self.catalogued(n - 2)
# TypeError: can only concatenate tuple (not "int") to tuple
# return self.cata[n], self.cata
# Tried to make this code return the dict
# many ways but taking too long to debug
# (Not because I tried returning after a return statement,
# I know that return exits a function/loop)
#self.cata = self.catalogued(n - 1) + self.catalogued(n - 2)
#return self.cata
def not_catalogued(self, n):
if (n == 0) or (n == 1):
return n
return self.catalogued(n - 1) + self.catalogued(n - 2)
def looping(self, n):
a, b = 1, 1
for i in range(n - 1):
a, b = b, a + b
return a
def generator(self):
global a, b
while True:
a, b = b, a + b
# pause the loop for later
# while global a, b
# become equal to the last
# and second to last
# n in the sequence
yield a
def gen(self, n):
generate = self.generator()
for i in range(n - 1):
next(generate)
return b
# Seems very similar to catalogued method?
class Memoize:
def __init__(self, n):
self.n = n
# track
self.memo = {}
def __call__(self, arg):
# it seems fib(n) is passed through the call method
# ran out of time to fully test and learn about memoization
# but going to do some reading after submitting this assignment
if arg not in self.memo:
self.memo[arg] = self.n(arg)
return self.memo[arg]
def main():
""" Doctest:
>>> doctest_n = 5
>>> doctest_waysoffib = WAYSOFFIB()
>>> print(doctest_waysoffib.catalogued(doctest_n))
5
>>> print(doctest_waysoffib.not_catalogued(doctest_n))
5
>>> print(doctest_waysoffib.looping(doctest_n))
5
Not sure how to doctest memoization method
googling was not succesful, but has been
fully tested:
#>>> doctest_memsoffib = Memoize(doctest_n)
#>>> @Memoize
#... doctest_memsoffib.gen(doctest_n))
#5
"""
"""
n = 5
waysoffib = WAYSOFFIB()
memsoffib = Memoize(n)
print(waysoffib.catalogued(n))
print(waysoffib.not_catalogued(n))
print(waysoffib.looping(n))
print(waysoffib.gen(n))
#TODO: MEMOIZE
@Memoize
def fib(n):
y, z = 1, 1
for _ in range(n - 1):
y, z = z , y + z
return y
print(fib(n))
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
main()
|
RomanSC/algorithms
|
chapter-1/waysoffib.py
|
Python
|
gpl-3.0
| 3,178
|
import urllib2
from bottle import route, run, static_file, debug, post, request, response
@route('/')
def index():
return send_static('index.html')
@post('/export/')
def export():
url = request.forms.get('gURL')
name = request.forms.get('expName')
data = urllib2.urlopen(url)
response.add_header("Content-Disposition", "attachment;filename=%s.csv" % name);
response.content_type = 'text/csv'
return data
@route('<filename:path>')
def send_static(filename):
return static_file(filename, root='./dashing/')
debug(True)
run(host='0.0.0.0', port=8000, reloader=True)
|
adrianrego/dashing
|
server.py
|
Python
|
bsd-2-clause
| 602
|
def get_resource():
pass
|
tomka/rednotebook
|
rednotebook/gui/keepnote/__init__.py
|
Python
|
gpl-2.0
| 28
|
# Author: Jason Lu
known = {0: 0, 1: 1}
def fibonacci(n):
assert (n >= 0), 'n must be >= 0'
if n in known:
return known[n]
res = fibonacci(n-1) + fibonacci(n-2)
known[n] = res
return res
if __name__ == '__main__':
from timeit import Timer
t = Timer('fibonacci(8)', 'from __main__ import fibonacci')
print(t.timeit())
|
jinzekid/codehub
|
python/设计模式/结构型模式/修饰起模式/fibonacci.py
|
Python
|
gpl-3.0
| 365
|
'''score releted stuff'''
import os
import pickle
import sys
import math
from config import *
import utils
import pygame
from pygame.locals import *
import music
MAX = 7 # max players number in highscore screen - magical number
CPL = 15 # max char per line
class HighScores(object):
def __init__(self, screen, father=None, score=-1):
self.screen = screen
self.father = father
self.score = score
self.top_scores = []
self.font1 = pygame.font.Font(FONT2, 40)
self.font2 = pygame.font.Font(FONT2, 30)
def loop(self):
music.stop_music()
if (not os.path.exists(HISCORES)) and (self.score <= 0):
text_list=["HIGH SCORE","I\'m sorry","Nobody has been saved.","Nobody has stopped being zombie"]
music.play_music(PLAYMUSIC)
self.draw_screen(text_list)
return self._waitKey()
else:
self.top_scores = self._load_score()
if self.score > 0:
self._add()
text_list = self._convert()
text_list[0:0]=['HIGH SCORES']
music.play_music(PLAYMUSIC)
self.draw_screen(text_list)
return self._waitKey()
def _add(self):
#Check minor_value before adding
top = self.top_scores
for i in range(MAX):
if self.score > top[i][1]:
name = InputPanel(self.screen,self.score).loop()
self.top_scores = top[:i] + [(name, self.score)] + top[i:-1]
break
f = file(HISCORES, 'w')
pickle.dump(self.top_scores,f)
def _load_score(self):
top_scores = []
if not os.path.exists(HISCORES):
top_scores = [("", 0),
("", 0),
("", 0),
("", 0),
("", 0),
("", 0),
("", 0)]
f = file(HISCORES, 'w')
pickle.dump(top_scores, f)
else:
f = file(HISCORES)
top_scores = pickle.load(f)
return top_scores
def _convert(self):
top7 = []
for i,element in enumerate(self.top_scores):
if element[1] != 0:
if element[1] >= SCORE_HUMAN:
kind=" (HUMAN)"
elif element[1] >= SCORE_DEAD:
kind=" (HAPPY DEAD)"
else:
kind=""
top7.append(str(i+1) + " " + element[0] + " " + str(element[1]) + kind)
return top7
def draw_screen(self, text_list):
pygame.display.set_caption(WINDOW_TITLE)
background = utils.load_image(SCOREIMAGE)
clock = pygame.time.Clock()
separator = 3
title = text_list[0]
text_list.remove(title)
title_img = self.font1.render(title, True, WHITE)
title_img2 = self.font1.render(title, True, BLACK)
topleft = (background.get_rect().width - title_img.get_rect().width) / 2, 25
topleft2 = (background.get_rect().width - title_img.get_rect().width) / 2-separator, 25-separator
background.blit(title_img2, topleft2)
background.blit(title_img, topleft)
bg = background.copy()
self.screen.blit(background, (0, 0))
pygame.display.flip()
hor_step = self.font2.get_height()
done = False
timeloop = 0
state = 0
while not done:
clock.tick(CLOCK_TICS)
pygame.display.flip()
self.screen.blit(background, (0, 0))
y = hor_step + 80
timeloop += 1
if timeloop == CLOCK_TICS:
state = 1
done = True
for i,text_item in enumerate(text_list):
img = self.font2.render(text_item, True, WHITE)
img2 = self.font2.render(text_item, True, BLACK)
x2 = self.screen.get_width()/2
if (state == 0) and (i%2 == 0):
x1 = x2 - ((WIDTH * 0.86) * (50 - timeloop) / 50)
elif (state == 0) and (i%2 == 1):
x1 = x2 + ((WIDTH * 0.86) * (50 - timeloop) / 50)
else:
x1=x2
x = (x1+(x2-x1)*(1-math.exp(-timeloop/20.0)))
x -= img.get_width()/2
self.screen.blit(img2, (x-separator, y-separator))
self.screen.blit(img, (x, y))
if x1 == x2:
bg.blit(img, (x, y))
y += hor_step + 10
return bg
def _waitKey(self):
while True:
pygame.event.clear()
event = pygame.event.wait()
if (event.type == QUIT):
sys.exit(0)
elif (pygame.key.get_pressed()[K_RETURN]) or (pygame.key.get_pressed()[K_ESCAPE]):
music.stop_music()
return self.father
class InputPanel(object):
'''A generic input panel.'''
def __init__(self, screen, score):
music.play_music(MENUMUSIC)
self.screen = screen
self.cursor = '!'
self.text = ""
self.done = False
self.font1 = pygame.font.Font(FONT3, 50)
self.clock = pygame.time.Clock()
score_text="Your score is "+str(score)
text_list=["CONGRATULATION !!!","WELCOME TO THE \"HALL OF FAME\"","Please, introduces your name"," ",score_text]
self.background = HighScores(screen).draw_screen(text_list)
pygame.display.flip()
self._draw_text()
def loop(self):
while not self.done:
self.clock.tick(CLOCK_TICS)
self.screen.blit(self.background, (0,0))
for event in pygame.event.get():
self.control(event)
self._draw_text()
pygame.display.flip()
music.stop_music()
return self.text
def control(self, event):
if event.type == QUIT:
sys.exit(0)
if event.type == KEYDOWN:
if event.key in (K_RETURN, K_KP_ENTER):
self.enter()
else:
char = event.unicode
if (self.valid_char(char)) and (len(self.text) < CPL):
self.text += char
self._draw_text()
if event.key == K_BACKSPACE:
self.text = self.text[:-1]
self._draw_text()
def valid_char(self, char):
if char:
if char in u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRTSUVWXYZ1234567890":
return True
return False
def enter(self):
self.done = True
def _draw_text(self):
y = 300 # Tune this value as you need
separator = 2
text_img = self.font1.render(self.text + self.cursor, True, WHITE)
text_img2 = self.font1.render(self.text + self.cursor, True, BLUE)
x = (self.screen.get_width() - text_img.get_width()) / 2
self.screen.blit(text_img2, (x-separator,y-separator))
self.screen.blit(text_img, (x,y))
|
jjconti/twistedmold
|
lib/scores.py
|
Python
|
gpl-3.0
| 7,144
|
# -*- coding: utf-8 -*-
import math
from collections import OrderedDict
import flask
import pandas as pd
import netCDF4
import numpy as np
from bokeh.embed import components
from bokeh.resources import INLINE
from bokeh.templates import RESOURCES
from bokeh.util.string import encode_utf8
from bokeh.models import DatetimeTickFormatter, ColumnDataSource, HoverTool, Plot, Range1d
from bokeh.palettes import RdBu11
from bokeh.models.glyphs import Text, Rect
from bokeh.plotting import figure, show, output_notebook, hplot, vplot
import utils.world_countries as wc
from utils.colormap import RGBAColorMapper
from viz2 import climate_map, timeseries, legend, title, get_slice
app = flask.Flask(__name__)
colormap = RGBAColorMapper(-6, 6, RdBu11)
@app.route("/")
def index():
# Create layout
c_map = climate_map()
ts = timeseries()
l = legend()
t = title()
map_legend = hplot(c_map, l)
layout = vplot(t, map_legend, ts)
plot_resources = RESOURCES.render(
js_raw=INLINE.js_raw,
css_raw=INLINE.css_raw,
js_files=INLINE.js_files,
css_files=INLINE.css_files,
)
script, div = components(layout, INLINE)
html = flask.render_template(
'embed.html',
plot_script=script,
plot_div=div,
plot_resources=plot_resources,
)
return encode_utf8(html)
if __name__ == "__main__":
app.run(debug=True)
|
ijstokes/bokeh-blaze-tutorial
|
app/my_bokeh_app.py
|
Python
|
mit
| 1,411
|
import pygame
import time
pygame.mixer.init()
s1 = pygame.mixer.Sound("1.wav")
s2 = pygame.mixer.Sound("2.wav")
s3 = pygame.mixer.Sound("3.wav")
s4 = pygame.mixer.Sound("4.wav")
s5 = pygame.mixer.Sound("5.wav")
squeak = pygame.mixer.Sound("squeak.wav")
explosion = pygame.mixer.Sound("explosion.wav")
final = pygame.mixer.Sound("finalCountdown.wav")
alert = pygame.mixer.Sound("alert2.wav")
def count():
sounds = [s5, s4, s3, s2, s1]
#sounds = [s1, s3, s2, s4, s5]
#sounds = [alert, final, s5, s4, s3, s2, s1]
for s in sounds:
s.play()
time.sleep(s.get_length())
def bang():
s = squeak
#s = explosion
s.play()
time.sleep(s.get_length())
if __name__ == "__main__":
count()
bang()
|
whaleygeek/school_mc_demo
|
demo3/countdown.py
|
Python
|
mit
| 768
|
# Copyright (c) 2019 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
import testtools
from kmip.pie import objects
from kmip.pie import sqltypes
class TestApplicationSpecificInformation(testtools.TestCase):
"""
Test suite for ApplicationSpecificInformation.
"""
def setUp(self):
super(TestApplicationSpecificInformation, self).setUp()
def tearDown(self):
super(TestApplicationSpecificInformation, self).tearDown()
def test_init(self):
"""
Test that an ApplicationSpecificInformation object can be instantiated.
"""
app_specific_info = objects.ApplicationSpecificInformation()
self.assertIsNone(app_specific_info.application_namespace)
self.assertIsNone(app_specific_info.application_data)
def test_invalid_application_namespace(self):
"""
Test that a TypeError is raised when an invalid application namespace
value is used to construct an ApplicationSpecificInformation attribute.
"""
kwargs = {"application_namespace": []}
self.assertRaisesRegex(
TypeError,
"The application namespace must be a string.",
objects.ApplicationSpecificInformation,
**kwargs
)
args = (
objects.ApplicationSpecificInformation(),
"application_namespace",
[]
)
self.assertRaisesRegex(
TypeError,
"The application namespace must be a string.",
setattr,
*args
)
def test_invalid_application_data(self):
"""
Test that a TypeError is raised when an invalid application data value
is used to construct an ApplicationSpecificInformation attribute.
"""
kwargs = {"application_data": []}
self.assertRaisesRegex(
TypeError,
"The application data must be a string.",
objects.ApplicationSpecificInformation,
**kwargs
)
args = (
objects.ApplicationSpecificInformation(),
"application_data",
[]
)
self.assertRaisesRegex(
TypeError,
"The application data must be a string.",
setattr,
*args
)
def test_repr(self):
"""
Test that repr can be applied to an ApplicationSpecificInformation
attribute.
"""
app_specific_info = objects.ApplicationSpecificInformation(
application_namespace="ssl",
application_data="www.example.com"
)
args = [
"application_namespace='{}'".format("ssl"),
"application_data='{}'".format("www.example.com")
]
expected = "ApplicationSpecificInformation({})".format(", ".join(args))
observed = repr(app_specific_info)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to an ApplicationSpecificInformation
attribute.
"""
app_specific_info = objects.ApplicationSpecificInformation(
application_namespace="ssl",
application_data="www.example.com"
)
expected = str(
{
"application_namespace": "ssl",
"application_data": "www.example.com"
}
)
observed = str(app_specific_info)
self.assertEqual(expected, observed)
def test_comparison_on_equal(self):
"""
Test that the equality/inequality operators return True/False when
comparing two ApplicationSpecificInformation attributes with the same
data.
"""
a = objects.ApplicationSpecificInformation()
b = objects.ApplicationSpecificInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.ApplicationSpecificInformation(
application_namespace="ssl",
application_data="www.example.com"
)
b = objects.ApplicationSpecificInformation(
application_namespace="ssl",
application_data="www.example.com"
)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_comparison_on_different_application_namespaces(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ApplicationSpecificInformation attributes with different
application namespaces.
"""
a = objects.ApplicationSpecificInformation(
application_namespace="a"
)
b = objects.ApplicationSpecificInformation(
application_namespace="b"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_different_application_data(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ApplicationSpecificInformation attributes with different
application data.
"""
a = objects.ApplicationSpecificInformation(
application_data="a"
)
b = objects.ApplicationSpecificInformation(
application_data="b"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_type_mismatch(self):
"""
Test that the equality/inequality operators return False/True when
comparing an ApplicationSpecificInformation attribute to a non
ApplicationSpecificInformation attribute.
"""
a = objects.ApplicationSpecificInformation()
b = "invalid"
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_save(self):
"""
Test that an ApplicationSpecificInformation attribute can be saved
using SQLAlchemy. This test will add an attribute instance to the
database, verify that no exceptions are thrown, and check that its
ID was set.
"""
app_specific_info = objects.ApplicationSpecificInformation(
application_namespace="ssl",
application_data="www.example.com"
)
engine = sqlalchemy.create_engine("sqlite:///:memory:", echo=True)
sqltypes.Base.metadata.create_all(engine)
session = sqlalchemy.orm.sessionmaker(bind=engine)()
session.add(app_specific_info)
session.commit()
self.assertIsNotNone(app_specific_info.id)
def test_get(self):
"""
Test that an ApplicationSpecificInformation attribute can be saved
and then retrieved using SQLAlchemy. This test adds the attribute to
the database and then retrieves it by ID and verifies its values.
"""
app_specific_info = objects.ApplicationSpecificInformation(
application_namespace="ssl",
application_data="www.example.com"
)
engine = sqlalchemy.create_engine("sqlite:///:memory:", echo=True)
sqltypes.Base.metadata.create_all(engine)
session = sqlalchemy.orm.sessionmaker(bind=engine)()
session.add(app_specific_info)
session.commit()
# Grab the ID now before making a new session to avoid a Detached error
# See http://sqlalche.me/e/bhk3 for more info.
app_specific_info_id = app_specific_info.id
session = sqlalchemy.orm.sessionmaker(bind=engine)()
retrieved_info = session.query(
objects.ApplicationSpecificInformation
).filter(
objects.ApplicationSpecificInformation.id == app_specific_info_id
).one()
session.commit()
self.assertEqual("ssl", retrieved_info.application_namespace)
self.assertEqual("www.example.com", retrieved_info.application_data)
|
OpenKMIP/PyKMIP
|
kmip/tests/unit/pie/objects/test_application_specific_information.py
|
Python
|
apache-2.0
| 8,739
|
import urllib
import urllib2
import cgi
from decimal import Decimal
from django.conf import settings
from django.core.urlresolvers import reverse
from django import forms
from django.http import Http404
from django.shortcuts import get_object_or_404
from tendenci.apps.payments.paypal.forms import PayPalPaymentForm
from tendenci.apps.payments.models import Payment
from tendenci.apps.payments.utils import payment_processing_object_updates
from tendenci.apps.payments.utils import log_payment, send_payment_notice
from tendenci.apps.site_settings.utils import get_setting
def prepare_paypal_form(request, payment):
amount = "%.2f" % payment.amount
image_url = get_setting("site", "global", "MerchantLogo")
site_url = get_setting('site', 'global', 'siteurl')
notify_url = '%s/%s' % (site_url, reverse('paypal.ipn'))
currency_code = get_setting('site', 'global', 'currency')
if not currency_code:
currency_code = 'USD'
params = {
'business': settings.PAYPAL_MERCHANT_LOGIN,
'image_url': image_url,
'amount': amount,
'notify_url': notify_url,
'currency_code': currency_code,
'invoice': payment.id,
'item_name': payment.description,
'first_name': payment.first_name,
'last_name': payment.last_name,
'email': payment.email,
'address1': payment.address,
'address2': payment.address2,
'city': payment.city,
'state': payment.state,
'country': payment.country,
'zip': payment.zip,
'night_phone_a': payment.phone,
}
form = PayPalPaymentForm(initial=params)
form.fields['return'] = forms.CharField(max_length=100,
widget=forms.HiddenInput,
initial=payment.response_page)
return form
def parse_pdt_validation(data):
result_params = {}
success = False
items_list = data.split('\n')
for i, item in enumerate(items_list):
if i == 0:
success = (item.lower() == 'success')
else:
# the item is url encoded - decode it
result_params.update(cgi.parse_qsl(item))
return success, result_params
def validate_with_paypal(request, validate_type):
"""
Validate either PDT or IPN with PayPal.
"""
if validate_type == 'PDT':
# we are on return url
# need to verify if payment is completed
# MERCHANT_TXN_KEY is your PDT identity token
params = {
'cmd': '_notify-synch',
'tx': request.GET.get('tx', ''),
'at': settings.MERCHANT_TXN_KEY
}
data = urllib.urlencode(params)
# Sample response:
# SUCCESS
# first_name=Jane+Doe
# last_name=Smith
# payment_status=Completed payer_email=janedoesmith%40hotmail.com
# payment_gross=3.99
# mc_currency=USD custom=For+the+purchase+of+the+rare+book+Green+Eggs+%26+Ham
# If the response is FAIL, PayPal recommends making sure that:
# The Transaction token is not bad.
# The ID token is not bad.
# The tokens have not expired.
else: # IPN
data = 'cmd=_notify-validate&%s' % request.POST.urlencode()
# The response is one single-word: VERIFIED or INVALID
headers = {"Content-type": "application/x-www-form-urlencoded",
'encoding': 'utf-8',
"Accept": "text/plain"}
request = urllib2.Request(settings.PAYPAL_POST_URL,
data,
headers)
response = urllib2.urlopen(request)
data = response.read()
if validate_type == 'PDT':
return parse_pdt_validation(data)
else:
return data.strip('\n').lower() == 'verified', None
def verify_no_fraud(response_d, payment):
# Has duplicate txn_id?
txn_id = response_d.get('txn_id')
if not txn_id:
return False
txn_id_exists = Payment.objects.filter(trans_id=txn_id).exists()
if txn_id_exists:
return False
# Does receiver_email matches?
receiver_email = response_d.get('receiver_email')
if receiver_email != settings.PAYPAL_MERCHANT_LOGIN:
return False
# Is the amount correct?
payment_gross = response_d.get('mc_gross', 0)
try:
float(payment_gross)
except ValueError:
payment_gross = 0
if Decimal(payment_gross) != payment.amount:
return False
return True
def paypal_thankyou_processing(request, response_d, **kwargs):
# validate with PayPal
validate_type = kwargs.get('validate_type', 'PDT')
if validate_type == 'PDT':
success, response_d = validate_with_paypal(request, validate_type)
else:
success = validate_with_paypal(request, validate_type)[0]
response_d = dict(map(lambda x: (x[0].lower(), x[1]),
response_d.items()))
if not success:
raise Http404
paymentid = response_d.get('invoice', 0)
try:
paymentid = int(paymentid)
except:
paymentid = 0
payment = get_object_or_404(Payment, pk=paymentid)
processed = False
# To prevent the fraud, verify the following:
# 1) txn_id is not a duplicate to prevent someone from reusing an old,
# completed transaction.
# 2) receiver_email is an email address registered in your PayPal
# account, to prevent the payment from being sent to a fraudulent
# account.
# 3) Other transaction details, such as the item number and price,
# to confirm that the price has not been changed.
# if balance==0, it means already processed
if payment.invoice.balance > 0:
# verify before updating database
is_valid = verify_no_fraud(response_d, payment)
if is_valid:
payment_update_paypal(request, response_d, payment)
payment_processing_object_updates(request, payment)
processed = True
# log an event
log_payment(request, payment)
# send payment recipients notification
send_payment_notice(request, payment)
return payment, processed
def payment_update_paypal(request, response_d, payment, **kwargs):
payment.first_name = response_d.get('first_name', '')
payment.last_name = response_d.get('last_name', '')
address = response_d.get('address1', '')
if address:
payment.address = address
address2 = response_d.get('address2', '')
if address2:
payment.address2 = address2
city = response_d.get('city', '')
if city:
payment.city = city
state = response_d.get('state', '')
if state:
payment.state = state
phone = response_d.get('night_phone_a', '')
if phone:
payment.phone = phone
payment.payment_type = response_d.get('payment_type', '')
result = response_d.get('payment_status', '')
if result.lower() == 'completed':
payment.response_code = '1'
payment.response_subcode = '1'
payment.response_reason_code = '1'
payment.status_detail = 'approved'
payment.trans_id = response_d.get('txn_id')
payment.response_reason_text = 'This transaction has been approved.'
else:
payment.response_code = 0
payment.response_reason_code = 0
payment.response_reason_text = 'This transaction is %s.' % (
response_d.get('payment_status')).lower()
if payment.is_approved:
payment.mark_as_paid()
payment.save()
payment.invoice.make_payment(request.user, payment.amount)
else:
if not payment.status_detail:
payment.status_detail = 'not approved'
payment.save()
|
alirizakeles/tendenci
|
tendenci/apps/payments/paypal/utils.py
|
Python
|
gpl-3.0
| 7,883
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from os.path import abspath, dirname, join
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_index_toctree',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.collapsible_code_block',
]
napoleon_include_init_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015, Anaconda.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# get all the versions that will appear in the version dropdown
f = open(join(dirname(abspath(__file__)), "all_versions.txt"))
all_versions = [x.strip() for x in reversed(f.readlines())]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#
# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!
# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files
bokeh_plot_pyfile_include_dirs = ['docs']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# This would more properly be done with rst_epilog but something about
# the combination of this with the bokeh-gallery directive breaks the build
rst_prolog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.containers.PropertyContainer`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
MAIN_SITE = '//bokehplots.com'
html_context = {
'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
'VERSION': version,
# Nav
'NAV': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Gallery', '/docs/gallery.html'),
('Docs', '//bokeh.pydata.org/en/latest/'),
('Github', '//github.com/bokeh/bokeh'),
),
# Links
'LINKS': (
('FAQs', MAIN_SITE + '/pages/faqs.html'),
('Technical vision', MAIN_SITE + '/pages/technical-vision.html'),
('Roadmap', MAIN_SITE + '/pages/roadmap.html'),
('Citation', MAIN_SITE + '/pages/citation.html'),
),
# About Links
'ABOUT': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Team', MAIN_SITE + '/pages/team.html'),
('Contact', MAIN_SITE + '/pages/contact.html'),
),
# Social links
'SOCIAL': (
('Contribute', MAIN_SITE + '/pages/contribute.html'),
('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
('YouTube', '//www.youtube.com/channel/UCK0rSk29mmg4UT4bIOvPYhw')
),
# Links for the docs sub navigation
'NAV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Reference', 'reference'),
('Releases', 'releases/%s' % version),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': all_versions,
'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'),
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Anaconda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
|
DuCorey/bokeh
|
sphinx/source/conf.py
|
Python
|
bsd-3-clause
| 9,859
|
"""Pytest configuration file
"""
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "treebeard.tests.settings"
import django
def pytest_report_header(config):
return "Django: " + django.get_version()
def pytest_configure(config):
django.setup()
|
tabo/django-treebeard
|
treebeard/tests/conftest.py
|
Python
|
apache-2.0
| 257
|
__author__ = 'yusaira-khan'
import unittest
import un_iife_ize.un_iife_ize as un_iife_ize
class CheckVar(unittest.TestCase):
def test_simple(self):
statement = [('var hello,world=5;', 0)]
exp = [('hello=undefined,world=5;', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_multiple(self):
statement = [('var hello,world=5;\nvar bye,nope;', 0)]
exp = [('hello=undefined,world=5;', 0), ('bye=undefined,nope=undefined;', 19)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_sections(self):
statement = [('var hello,world=5;\nvar bye,nope;', 0),
('var hello,world=5;\nvar bye,nope;', 30)]
exp = [('hello=undefined,world=5;', 0),
('bye=undefined,nope=undefined;', 19),
('hello=undefined,world=5;', 30),
('bye=undefined,nope=undefined;', 49)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_deliberate_iife(self):
statement = [('var hello=function(){;}', 0)]
exp = [('hello=function(){;}', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret)
self.assertEqual(ret, exp)
def test_deliberate_iife_barc(self):
statement = [('var hello = (function(){;}())', 0)]
exp = [(' hello = (function(){;}())', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret, len(exp[0][0]), len(ret[0][0]))
self.assertEqual(ret, exp)
def test_double_assignment(self):
statement = [('var hello=wow=;', 0)]
exp = [('hello=wow=', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret)
self.assertEqual(ret, exp)
def test_inside_function(self):
statement = [('function(a){var hello=5;}', 30)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.unmodified
print("woadh", ret, v.unmodified)
self.assertEqual(ret, statement)
def test_sections_unmodified(self):
statement = [('var hello,world=5;\nfunction(){}\nvar bye,nope;', 0),
('var hello,world=5;\nvar bye,nope;', 30)]
exp = [('\nfunction(){}\n', 18), ('', len(statement[0][0]) + statement[0][1]),
('\n', 48), ('', len(statement[1][0]) + statement[1][1])]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.unmodified
print("ret", ret)
print("expt", exp)
self.assertEqual(ret, exp)
if __name__ == '__main__':
unittest.main()
|
yusaira-khan/un-iife-ize
|
tests/vars.py
|
Python
|
mit
| 2,836
|
"""Entitlement Models"""
import logging
import uuid as uuid_tools
from datetime import timedelta
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import IntegrityError, models, transaction
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from model_utils import Choices
from model_utils.models import TimeStampedModel
from simple_history.models import HistoricalRecords
from course_modes.models import CourseMode
from entitlements.utils import is_course_run_entitlement_fulfillable
from lms.djangoapps.certificates.models import GeneratedCertificate
from lms.djangoapps.commerce.utils import refund_entitlement
from openedx.core.djangoapps.catalog.utils import get_course_uuid_for_course
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student.models import CourseEnrollment, CourseEnrollmentException
from util.date_utils import strftime_localized
log = logging.getLogger("common.entitlements.models")
@python_2_unicode_compatible
class CourseEntitlementPolicy(models.Model):
"""
Represents the Entitlement's policy for expiration, refunds, and regaining a used certificate
.. no_pii:
"""
DEFAULT_EXPIRATION_PERIOD_DAYS = 730
DEFAULT_REFUND_PERIOD_DAYS = 60
DEFAULT_REGAIN_PERIOD_DAYS = 14
MODES = Choices((None, u'---------'), CourseMode.VERIFIED, CourseMode.PROFESSIONAL)
# Use a DurationField to calculate time as it returns a timedelta, useful in performing operations with datetimes
expiration_period = models.DurationField(
default=timedelta(days=DEFAULT_EXPIRATION_PERIOD_DAYS),
help_text=u"Duration in days from when an entitlement is created until when it is expired.",
null=False
)
refund_period = models.DurationField(
default=timedelta(days=DEFAULT_REFUND_PERIOD_DAYS),
help_text=u"Duration in days from when an entitlement is created until when it is no longer refundable",
null=False
)
regain_period = models.DurationField(
default=timedelta(days=DEFAULT_REGAIN_PERIOD_DAYS),
help_text=(u"Duration in days from when an entitlement is redeemed for a course run until "
u"it is no longer able to be regained by a user."),
null=False
)
site = models.ForeignKey(Site, null=True, on_delete=models.CASCADE)
mode = models.CharField(max_length=32, choices=MODES, null=True)
def get_days_until_expiration(self, entitlement):
"""
Returns an integer of number of days until the entitlement expires.
Includes the logic for regaining an entitlement.
"""
now_timestamp = now()
expiry_date = entitlement.created + self.expiration_period
days_until_expiry = (expiry_date - now_timestamp).days
if not entitlement.enrollment_course_run:
return days_until_expiry
course_overview = CourseOverview.get_from_id(entitlement.enrollment_course_run.course_id)
# Compute the days left for the regain
days_since_course_start = (now_timestamp - course_overview.start).days
days_since_enrollment = (now_timestamp - entitlement.enrollment_course_run.created).days
days_since_entitlement_created = (now_timestamp - entitlement.created).days
# We want to return whichever days value is less since it is then the more recent one
days_until_regain_ends = (self.regain_period.days - # pylint: disable=no-member
min(days_since_course_start, days_since_enrollment, days_since_entitlement_created))
# If the base days until expiration is less than the days until the regain period ends, use that instead
if days_until_expiry < days_until_regain_ends:
return days_until_expiry
return days_until_regain_ends # pylint: disable=no-member
def is_entitlement_regainable(self, entitlement):
"""
Determines from the policy if an entitlement can still be regained by the user, if they choose
to by leaving and regaining their entitlement within policy.regain_period days from start date of
the course or their redemption, whichever comes later, and the expiration period hasn't passed yet
"""
if entitlement.expired_at:
return False
if entitlement.enrollment_course_run:
if GeneratedCertificate.certificate_for_student(
entitlement.user_id, entitlement.enrollment_course_run.course_id) is not None:
return False
# This is >= because a days_until_expiration 0 means that the expiration day has not fully passed yet
# and that the entitlement should not be expired as there is still time
return self.get_days_until_expiration(entitlement) >= 0
return False
def is_entitlement_refundable(self, entitlement):
"""
Determines from the policy if an entitlement can still be refunded, if the entitlement has not
yet been redeemed (enrollment_course_run is NULL) and policy.refund_period has not yet passed, or if
the entitlement has been redeemed, but the regain period hasn't passed yet.
"""
# If the Entitlement is expired already it is not refundable
if entitlement.expired_at:
return False
# If there's no order number, it cannot be refunded
if entitlement.order_number is None:
return False
# This is > because a get_days_since_created of refund_period means that that many days have passed,
# which should then make the entitlement no longer refundable
if entitlement.get_days_since_created() > self.refund_period.days: # pylint: disable=no-member
return False
if entitlement.enrollment_course_run:
return self.is_entitlement_regainable(entitlement)
return True
def is_entitlement_redeemable(self, entitlement):
"""
Determines from the policy if an entitlement can be redeemed, if it has not passed the
expiration period of policy.expiration_period, and has not already been redeemed
"""
# This is < because a get_days_since_created of expiration_period means that that many days have passed,
# which should then expire the entitlement
return (entitlement.get_days_since_created() < self.expiration_period.days # pylint: disable=no-member
and not entitlement.enrollment_course_run
and not entitlement.expired_at)
def __str__(self):
return u'Course Entitlement Policy: expiration_period: {}, refund_period: {}, regain_period: {}, mode: {}'\
.format(
self.expiration_period,
self.refund_period,
self.regain_period,
self.mode
)
class CourseEntitlement(TimeStampedModel):
"""
Represents a Student's Entitlement to a Course Run for a given Course.
.. no_pii:
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
uuid = models.UUIDField(default=uuid_tools.uuid4, editable=False, unique=True)
course_uuid = models.UUIDField(help_text=u'UUID for the Course, not the Course Run')
expired_at = models.DateTimeField(
null=True,
help_text=u'The date that an entitlement expired, if NULL the entitlement has not expired.',
blank=True
)
mode = models.CharField(max_length=100, help_text=u'The mode of the Course that will be applied on enroll.')
enrollment_course_run = models.ForeignKey(
'student.CourseEnrollment',
null=True,
help_text=u'The current Course enrollment for this entitlement. If NULL the Learner has not enrolled.',
blank=True,
on_delete=models.CASCADE,
)
order_number = models.CharField(max_length=128, default=None, null=True)
refund_locked = models.BooleanField(default=False)
_policy = models.ForeignKey(CourseEntitlementPolicy, null=True, blank=True, on_delete=models.CASCADE)
history = HistoricalRecords()
class Meta:
unique_together = ('course_uuid', 'order_number')
@property
def expired_at_datetime(self):
"""
Getter to be used instead of expired_at because of the conditional check and update
"""
self.update_expired_at()
return self.expired_at
@expired_at_datetime.setter
def expired_at_datetime(self, value):
"""
Setter to be used instead for expired_at for consistency
"""
self.expired_at = value
@property
def policy(self):
"""
Getter to be used instead of _policy because of the null object pattern
"""
return self._policy or CourseEntitlementPolicy()
@policy.setter
def policy(self, value):
"""
Setter to be used instead of _policy because of the null object pattern
"""
self._policy = value
def get_days_since_created(self):
"""
Returns an integer of number of days since the entitlement has been created
"""
return (now() - self.created).days
def update_expired_at(self):
"""
Updates the expired_at attribute if it is not set AND it is expired according to the entitlement's policy,
OR if the policy can no longer be regained AND the policy has been redeemed
"""
if not self.expired_at:
if (self.policy.get_days_until_expiration(self) < 0 or
(self.enrollment_course_run and not self.is_entitlement_regainable())):
self.expire_entitlement()
def get_days_until_expiration(self):
"""
Returns an integer of number of days until the entitlement expires based on the entitlement's policy
"""
return self.policy.get_days_until_expiration(self)
def is_entitlement_regainable(self):
"""
Returns a boolean as to whether or not the entitlement can be regained based on the entitlement's policy
"""
return self.policy.is_entitlement_regainable(self)
def is_entitlement_refundable(self):
"""
Returns a boolean as to whether or not the entitlement can be refunded based on the entitlement's policy
"""
return not self.refund_locked and self.policy.is_entitlement_refundable(self)
def is_entitlement_redeemable(self):
"""
Returns a boolean as to whether or not the entitlement can be redeemed based on the entitlement's policy
"""
return self.policy.is_entitlement_redeemable(self)
def to_dict(self):
"""
Convert entitlement to dictionary representation including relevant policy information.
Returns:
The entitlement UUID
The associated course's UUID
The date at which the entitlement expired. None if it is still active.
The localized string representing the date at which the entitlement expires.
"""
expiration_date = None
if self.get_days_until_expiration() < settings.ENTITLEMENT_EXPIRED_ALERT_PERIOD:
expiration_date = strftime_localized(
now() + timedelta(days=self.get_days_until_expiration()),
'SHORT_DATE'
)
expired_at = strftime_localized(self.expired_at_datetime, 'SHORT_DATE') if self.expired_at_datetime else None
return {
'uuid': str(self.uuid),
'course_uuid': str(self.course_uuid),
'expired_at': expired_at,
'expiration_date': expiration_date
}
def set_enrollment(self, enrollment):
"""
Fulfills an entitlement by specifying a session.
"""
self.enrollment_course_run = enrollment
self.save()
def expire_entitlement(self):
"""
Expire the entitlement.
"""
self.expired_at = now()
self.save()
@classmethod
def unexpired_entitlements_for_user(cls, user):
return cls.objects.filter(user=user, expired_at=None).select_related('user')
@classmethod
def get_entitlement_if_active(cls, user, course_uuid):
"""
Retrieves the active entitlement for the course_uuid and User.
An active entitlement is defined as an entitlement that has not yet expired or has a currently enrolled session.
If there is more than one entitlement, return the most recently created active entitlement.
Arguments:
user: User that owns the Course Entitlement
course_uuid: The Course UUID for a Course that we are retrieving active entitlements for.
Returns:
CourseEntitlement: Returns the most recently created entitlement for a given course uuid if an
active entitlement exists, otherwise returns None
"""
try:
return cls.objects.filter(
user=user,
course_uuid=course_uuid
).exclude(
expired_at__isnull=False,
enrollment_course_run=None
).latest('created')
except CourseEntitlement.DoesNotExist:
return None
@classmethod
def get_active_entitlements_for_user(cls, user):
"""
Returns a list of active (enrolled or not yet expired) entitlements.
Returns any entitlements that are:
1) Not expired and no session selected
2) Not expired and a session is selected
3) Expired and a session is selected
Does not return any entitlements that are:
1) Expired and no session selected
"""
return cls.objects.filter(user=user).exclude(
expired_at__isnull=False,
enrollment_course_run=None
).select_related('user').select_related('enrollment_course_run')
@classmethod
def get_fulfillable_entitlements(cls, user):
"""
Returns all fulfillable entitlements for a User
Arguments:
user (User): The user we are looking at the entitlements of.
Returns
Queryset: A queryset of course Entitlements ordered descending by creation date that a user can enroll in.
These must not be expired and not have a course run already assigned to it.
"""
return cls.objects.filter(
user=user,
).exclude(
expired_at__isnull=False,
enrollment_course_run__isnull=False
).order_by('-created')
@classmethod
def get_fulfillable_entitlement_for_user_course_run(cls, user, course_run_key):
"""
Retrieves a fulfillable entitlement for the user and the given course run.
Arguments:
user (User): The user that we are inspecting the entitlements for.
course_run_key (CourseKey): The course run Key.
Returns:
CourseEntitlement: The most recent fulfillable CourseEntitlement, None otherwise.
"""
# Check if the User has any fulfillable entitlements.
# Note: Wait to retrieve the Course UUID until we have confirmed the User has fulfillable entitlements.
# This was done to avoid calling the APIs when the User does not have an entitlement.
entitlements = cls.get_fulfillable_entitlements(user)
if entitlements:
course_uuid = get_course_uuid_for_course(course_run_key)
if course_uuid:
entitlement = entitlements.filter(course_uuid=course_uuid).first()
if (entitlement and is_course_run_entitlement_fulfillable(
course_run_key=course_run_key, entitlement=entitlement) and
entitlement.is_entitlement_redeemable()):
return entitlement
return None
@classmethod
@transaction.atomic
def enroll_user_and_fulfill_entitlement(cls, entitlement, course_run_key):
"""
Enrolls the user in the Course Run and updates the entitlement with the new Enrollment.
Returns:
bool: True if successfully fulfills given entitlement by enrolling the user in the given course run.
"""
try:
enrollment = CourseEnrollment.enroll(
user=entitlement.user,
course_key=course_run_key,
mode=entitlement.mode
)
except CourseEnrollmentException:
log.exception(u'Login for Course Entitlement {uuid} failed'.format(uuid=entitlement.uuid))
return False
entitlement.set_enrollment(enrollment)
return True
@classmethod
def check_for_existing_entitlement_and_enroll(cls, user, course_run_key):
"""
Looks at the User's existing entitlements to see if the user already has a Course Entitlement for the
course run provided in the course_key. If the user does have an Entitlement with no run set, the User is
enrolled in the mode set in the Entitlement.
Arguments:
user (User): The user that we are inspecting the entitlements for.
course_run_key (CourseKey): The course run Key.
Returns:
bool: True if the user had an eligible course entitlement to which an enrollment in the
given course run was applied.
"""
entitlement = cls.get_fulfillable_entitlement_for_user_course_run(user, course_run_key)
if entitlement:
return cls.enroll_user_and_fulfill_entitlement(entitlement, course_run_key)
return False
@classmethod
def unenroll_entitlement(cls, course_enrollment, skip_refund):
"""
Un-enroll the user from entitlement and refund if needed.
"""
course_uuid = get_course_uuid_for_course(course_enrollment.course_id)
course_entitlement = cls.get_entitlement_if_active(course_enrollment.user, course_uuid)
if course_entitlement and course_entitlement.enrollment_course_run == course_enrollment:
course_entitlement.set_enrollment(None)
if not skip_refund and course_entitlement.is_entitlement_refundable():
course_entitlement.expire_entitlement()
course_entitlement.refund()
def refund(self):
"""
Initiate refund process for the entitlement.
"""
refund_successful = refund_entitlement(course_entitlement=self)
if not refund_successful:
# This state is achieved in most cases by a failure in the ecommerce service to process the refund.
log.warning(
u'Entitlement Refund failed for Course Entitlement [%s], alert User',
self.uuid
)
# Force Transaction reset with an Integrity error exception, this will revert all previous transactions
raise IntegrityError
def save(self, *args, **kwargs):
"""
Null out empty strings in order_number
"""
if not self.order_number:
self.order_number = None
super(CourseEntitlement, self).save(*args, **kwargs)
@python_2_unicode_compatible
class CourseEntitlementSupportDetail(TimeStampedModel):
"""
Table recording support interactions with an entitlement
.. no_pii:
"""
# Reasons deprecated
LEAVE_SESSION = u'LEAVE'
CHANGE_SESSION = u'CHANGE'
LEARNER_REQUEST_NEW = u'LEARNER_NEW'
COURSE_TEAM_REQUEST_NEW = u'COURSE_TEAM_NEW'
OTHER = u'OTHER'
ENTITLEMENT_SUPPORT_REASONS = (
(LEAVE_SESSION, u'Learner requested leave session for expired entitlement'),
(CHANGE_SESSION, u'Learner requested session change for expired entitlement'),
(LEARNER_REQUEST_NEW, u'Learner requested new entitlement'),
(COURSE_TEAM_REQUEST_NEW, u'Course team requested entitlement for learnerg'),
(OTHER, u'Other'),
)
REISSUE = u'REISSUE'
CREATE = u'CREATE'
ENTITLEMENT_SUPPORT_ACTIONS = (
(REISSUE, u'Re-issue entitlement'),
(CREATE, u'Create new entitlement'),
)
entitlement = models.ForeignKey('entitlements.CourseEntitlement', on_delete=models.CASCADE)
support_user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
#Deprecated: use action instead.
reason = models.CharField(max_length=15, choices=ENTITLEMENT_SUPPORT_REASONS)
action = models.CharField(max_length=15, choices=ENTITLEMENT_SUPPORT_ACTIONS)
comments = models.TextField(null=True)
unenrolled_run = models.ForeignKey(
CourseOverview,
null=True,
blank=True,
db_constraint=False,
on_delete=models.DO_NOTHING,
)
history = HistoricalRecords()
def __str__(self):
"""Unicode representation of an Entitlement"""
return u'Course Entitlement Support Detail: entitlement: {}, support_user: {}, reason: {}'.format(
self.entitlement,
self.support_user,
self.reason,
)
@classmethod
def get_support_actions_list(cls):
"""
Method for retrieving a serializable version of the entitlement support reasons
Returns
list: Containing the possible support actions
"""
return [
action[0] # get just the action code, not the human readable description.
for action
in cls.ENTITLEMENT_SUPPORT_ACTIONS
]
|
cpennington/edx-platform
|
common/djangoapps/entitlements/models.py
|
Python
|
agpl-3.0
| 21,559
|
import subprocess
import time
from oeqa.oetest import oeRuntimeTest
class CommSshTest(oeRuntimeTest):
'''Ssh to logon target device
@class CommSshTest
'''
def test_comm_ssh(self):
'''check device ssh ability
@fn test_comm_ssh
@param self
@return
'''
# Run any command by target.run method. if pass, it proves ssh is good
(status, output) = self.target.run('uname -a')
##
# TESTPOINT: #1, test_comm_ssh
#
self.assertEqual(status, 0, msg="Error messages: %s" % output)
|
ipuustin/intel-iot-refkit
|
meta-iotqa/lib/oeqa/runtime/connectivity/services/ssh.py
|
Python
|
mit
| 572
|
"""
Given s1, s2, s3, find whether s3 is formed by the interleaving of s1 and s2.
For example,
Given:
s1 = "aabcc",
s2 = "dbbca",
When s3 = "aadbbcbcac", return true.
When s3 = "aadbbbaccc", return false.
"""
__author__ = 'Danyang'
class Solution(object):
def isInterleave(self, s1, s2, s3):
"""
dfs
dp
dp[i][j], for s3[:i+j] interleaved by s1[:i], s2[:j]
- d b b c a
- T F F F F F
a T F F F F F
a T T T T T F
b F T T F T F
c F F T T T T
c F F F T F T
notice the boundary condition
Thought:
dfs, easy to come up, but high space complexity
thus, dp
f[i][j] represents s3[:i+j] comes from s1[:i] and s2[:j]
two possible conditions:
1. s[i+j] = s[i]
2. s[i+j] = s[j]
others are false
f[i][j] = f[i-1][j] if s3[i+j]==s1[i]
= f[i][j-1] if s3[i+j]==s2[j]
= false
:type s1: str
:type s2: str
:type s3: str
:param s1:
:param s2:
:param s3:
:return: boolean
"""
m = len(s1)
n = len(s2)
if m+n != len(s3):
return False
dp = [[False for _ in xrange(n+1)] for _ in xrange(m+1)]
# initialize boundary conditions
dp[0][0] = True
for i in xrange(1, m+1):
dp[i][0] = dp[i-1][0] and s3[i+0-1] == s1[i-1]
for j in xrange(1, n+1):
dp[0][j] = dp[0][j-1] and s3[0+j-1] == s2[j-1]
# calculating
for i in xrange(1, m+1):
for j in xrange(1, n+1):
if not dp[i][j]:
dp[i][j] = dp[i-1][j] and s3[i+j-1] == s1[i-1]
if not dp[i][j]:
dp[i][j] = dp[i][j-1] and s3[i+j-1] == s2[j-1]
return dp[-1][-1]
def isInterleave_TLE(self, s1, s2, s3):
"""
dfs
Time Limit Exceeded
:param s1:
:param s2:
:param s3:
:return: boolean
"""
if not s3:
return True
letter = s3[0]
if s1 and s1[0] == letter:
if self.isInterleave(s1[1:], s2, s3[1:]):
return True
if s2 and s2[0] == letter:
if self.isInterleave(s1, s2[1:], s3[1:]):
return True
return False
if __name__ == "__main__":
assert Solution().isInterleave("aa", "ab", "abaa") == True
assert Solution().isInterleave("aabcc", "dbbca", "aadbbcbcac") == True
assert Solution().isInterleave("aabcc", "dbbca", "aadbbbaccc") == False
|
algorhythms/LeetCode
|
097 Interleaving String.py
|
Python
|
mit
| 2,718
|
# Base class for a BLE device. Each OS supported by the library should
# inherit from this class and implement the abstract methods.
# Author: Tony DiCola
#
# Copyright (c) 2015 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
class Device(object):
"""Base class for a BLE device."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def connect(self, timeout_sec):
"""Connect to the BLE device."""
raise NotImplementedError
@abc.abstractmethod
def disconnect(self, timeout_sec):
"""Disconnect from the BLE device."""
raise NotImplementedError
@abc.abstractmethod
def list_services(self):
"""Return a list of GattService objects that have been discovered for
this device.
"""
raise NotImplementedError
@abc.abstractproperty
def discover(self, service_uuids, char_uuids, timeout_sec=30):
"""Wait up to timeout_sec for the specified services and characteristics
to be discovered on the device. If the timeout is exceeded without
discovering the services and characteristics then an exception is thrown.
"""
raise NotImplementedError
@abc.abstractproperty
def advertised(self):
"""Return a list of UUIDs for services that are advertised by this
device.
"""
raise NotImplementedError
@abc.abstractproperty
def id(self):
"""Return a unique identifier for this device. On supported platforms
this will be the MAC address of the device, however on unsupported
platforms (Mac OSX) it will be a unique ID like a UUID.
"""
raise NotImplementedError
@abc.abstractproperty
def name(self):
"""Return the name of this device."""
raise NotImplementedError
@abc.abstractproperty
def is_connected(self):
"""Return True if the device is connected to the system, otherwise False.
"""
raise NotImplementedError
@abc.abstractproperty
def rssi(self):
"""Return the RSSI signal strength in decibels."""
raise NotImplementedError
def find_service(self, uuid):
"""Return the first child service found that has the specified
UUID. Will return None if no service that matches is found.
"""
for service in self.list_services():
if service.uuid == uuid:
return service
return None
def __eq__(self, other):
"""Test if this device is the same as the provided device."""
return self.id == other.id
def __ne__(self, other):
"""Test if this device is not the same as the provided device."""
return self.id != other.id
def __hash__(self):
"""Hash function implementation that allows device instances to be put
inside dictionaries and other containers.
"""
return hash(self.id)
|
fourtriple/bt-dist
|
Adafruit_Python_BluefruitLE/Adafruit_BluefruitLE/interfaces/device.py
|
Python
|
gpl-3.0
| 3,958
|
"""
Implementation of the GMO notification
"""
import logging
from shoppingcart.processors.GMO import parse_order_id, payment_accepted, AbstractReadParams
from shoppingcart.processors.exceptions import CCProcessorException
from shoppingcart.processors.helpers import get_processor_config
from ga_shoppingcart import order_helpers
NOTIFICATION_STATUS_CAPTURE = 'CAPTURE'
NOTIFICATION_STATUS_VOID = 'VOID'
NOTIFICATION_STATUS_CANCEL = 'CANCEL'
NOTIFICATION_STATUS_REQSUCCESS = 'REQSUCCESS'
NOTIFICATION_STATUS_RETURN = 'RETURN'
NOTIFICATION_STATUS_RETURNX = 'RETURNX'
NOTIFICATION_JOB_CAPTURE = 'CAPTURE'
NOTIFICATION_JOB_VOID = 'VOID'
NOTIFICATION_JOB_CANCEL = 'CANCEL'
NOTIFICATION_JOB_RETURN = 'RETURN'
NOTIFICATION_JOB_RETURNX = 'RETURNX'
JOB_CANCEL_CARD = [
NOTIFICATION_JOB_VOID,
NOTIFICATION_JOB_RETURN,
NOTIFICATION_JOB_RETURNX,
]
JOB_CANCEL_DCM = [
NOTIFICATION_JOB_CANCEL,
]
STATUS_CANCEL_CARD = [
NOTIFICATION_STATUS_VOID,
NOTIFICATION_STATUS_RETURN,
NOTIFICATION_STATUS_RETURNX,
]
STATUS_CANCEL_DCM = [
NOTIFICATION_STATUS_CANCEL,
]
IGNORED_NOTIFICATION_STATUSES = [
NOTIFICATION_STATUS_REQSUCCESS,
]
log = logging.getLogger(__name__)
def process_notification(_params):
"""
Process the notification data that was passed from the GMO.
"""
params = _NotificationParams(_params)
if params.shop_id != get_processor_config().get('ACCESS_KEY'):
raise CCProcessorException("Invalid ACCESS_KEY {}".format(params.shop_id))
if params.has_error():
raise CCProcessorException(
"error has occurred at GMO error_code={error_code}, error_detail_code={error_detail_code}".format(
error_code=params.error_code, error_detail_code=params.error_detail_code
)
)
params.verify_params()
if params.is_capture():
_process_capture(params)
elif params.is_cancel():
_process_cancel(params)
else:
log.warning(
"We received an unknown notification. order_id={order_id}, job={job}, status={status}".format(
order_id=params.order_id,
job=params.job,
status=params.status
)
)
def _process_capture(params):
"""
Do the processing of the CAPTURE.
:param params: parameters wrapped by _NotificationParams
"""
if params.is_capture_success():
log.info('We received the notification of the CAPTURE success.')
order = _payment_accepted(params)
# set language for the translation at the time of sending email
order_helpers.set_language_from_order(order)
order_helpers.purchase(order, processor_reply_dump=params.get_dumps(), override=False)
elif params.is_capture_ignore():
# nothing to do
log.info('We received the notification of the CAPTURE, but do nothing.')
else:
raise CCProcessorException('Illegal state has been notified. CAPTURE has failed.')
def _process_cancel(params):
"""
Do the processing of the cancel.
:param params: parameters wrapped by _NotificationParams
"""
if params.is_cancel_success():
log.info('We received the notification of the CANCEL success.')
order = _payment_accepted(params)
order.refund()
else:
raise CCProcessorException('Illegal state has been notified. CANCEL has failed.')
def _payment_accepted(params):
_amount = params.amount
_tax = params.tax
_currency = params.currency
if params.is_docomo() and params.is_cancel():
_amount = params.docomo_cancel_amount
_tax = params.docomo_cancel_tax
if _currency == 'JPN':
# GMO notification programs returns JPN the currency code of the Japanese Yen.
_currency = 'JPY'
return payment_accepted(
parse_order_id(params.order_id),
_amount,
_tax,
_currency
)
class _NotificationParams(AbstractReadParams):
params_config_key = 'NOTIFICATION_PARAMS_MAP'
attr_map = {
'p001': 'shop_id',
'p005': 'order_id',
'p006': 'status',
'p007': 'job',
'p008': 'amount',
'p009': 'tax',
'p010': 'currency',
'p017': 'error_code',
'p018': 'error_detail_code',
'p019': 'payment_type',
'p913': 'docomo_cancel_amount',
'p914': 'docomo_cancel_tax',
}
def has_error(self):
return bool(self.error_code.strip())
def is_card(self):
return self.payment_type == '0'
def is_docomo(self):
return self.payment_type == '9'
def is_capture(self):
return self.job == NOTIFICATION_JOB_CAPTURE
def is_capture_success(self):
return self.is_capture() and self.status == NOTIFICATION_STATUS_CAPTURE
def is_capture_ignore(self):
return self.is_capture() and self.status in IGNORED_NOTIFICATION_STATUSES
def is_cancel(self):
return (
self.is_card() and self.job in JOB_CANCEL_CARD
) or (
self.is_docomo() and self.job in JOB_CANCEL_DCM
)
def is_cancel_success(self):
return self.is_cancel() and (
(self.is_card() and self.status in STATUS_CANCEL_CARD) or
(self.is_docomo() and self.status in STATUS_CANCEL_DCM)
)
def verify_params(self):
"""
Validate that we have the paramters we expect and can convert them to the appropriate types.
"""
if self.is_docomo() and self.is_cancel():
required_params = [('docomo_cancel_amount', int), ('docomo_cancel_tax', int), ('currency', str), ]
else:
required_params = [('amount', int), ('tax', int), ('currency', str), ]
for key, key_type in required_params:
if not hasattr(self, key) or getattr(self, key) is None:
raise CCProcessorException(
u"The payment processor did not return a required parameter: {}".format(key)
)
try:
setattr(self, key, key_type(getattr(self, key)))
except (ValueError, TypeError):
raise CCProcessorException(
u"The payment processor returned a badly-typed value {value} for parameter {parameter}.".format(
value=getattr(self, key), parameter=key
)
)
|
nttks/edx-platform
|
lms/djangoapps/ga_shoppingcart/notifications.py
|
Python
|
agpl-3.0
| 6,388
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
"Test for copy"
from trytond.model import ModelSQL, fields
__all__ = [
'CopyOne2Many', 'CopyOne2ManyTarget',
'CopyOne2ManyReference', 'CopyOne2ManyReferenceTarget',
'CopyMany2Many', 'CopyMany2ManyTarget', 'CopyMany2ManyRelation',
'CopyMany2ManyReference', 'CopyMany2ManyReferenceTarget',
'CopyMany2ManyReferenceRelation',
]
class CopyOne2Many(ModelSQL):
"Copy One2Many"
__name__ = 'test.copy.one2many'
name = fields.Char('Name')
one2many = fields.One2Many('test.copy.one2many.target', 'one2many',
'One2Many')
class CopyOne2ManyTarget(ModelSQL):
"Copy One2Many Target"
__name__ = 'test.copy.one2many.target'
name = fields.Char('Name')
one2many = fields.Many2One('test.copy.one2many', 'One2Many')
class CopyOne2ManyReference(ModelSQL):
"Copy One2Many Reference"
__name__ = 'test.copy.one2many_reference'
name = fields.Char('Name')
one2many = fields.One2Many('test.copy.one2many_reference.target',
'one2many', 'One2Many')
class CopyOne2ManyReferenceTarget(ModelSQL):
"Copy One2Many ReferenceTarget"
__name__ = 'test.copy.one2many_reference.target'
name = fields.Char('Name')
one2many = fields.Reference('One2Many', [
(None, ''),
('test.copy.one2many_reference', 'One2Many'),
])
class CopyMany2Many(ModelSQL):
"Copy Many2Many"
__name__ = 'test.copy.many2many'
name = fields.Char('Name')
many2many = fields.Many2Many('test.copy.many2many.rel', 'many2many',
'many2many_target', 'Many2Many')
class CopyMany2ManyTarget(ModelSQL):
"Copy Many2Many Target"
__name__ = 'test.copy.many2many.target'
name = fields.Char('Name')
class CopyMany2ManyRelation(ModelSQL):
"Copy Many2Many Relation"
__name__ = 'test.copy.many2many.rel'
name = fields.Char('Name')
many2many = fields.Many2One('test.copy.many2many', 'Many2Many')
many2many_target = fields.Many2One('test.copy.many2many.target',
'Many2Many Target')
class CopyMany2ManyReference(ModelSQL):
"Copy Many2ManyReference"
__name__ = 'test.copy.many2many_reference'
name = fields.Char('Name')
many2many = fields.Many2Many('test.copy.many2many_reference.rel',
'many2many', 'many2many_target', 'Many2Many')
class CopyMany2ManyReferenceTarget(ModelSQL):
"Copy Many2ManyReference Target"
__name__ = 'test.copy.many2many_reference.target'
name = fields.Char('Name')
class CopyMany2ManyReferenceRelation(ModelSQL):
"Copy Many2ManyReference Relation"
__name__ = 'test.copy.many2many_reference.rel'
name = fields.Char('Name')
many2many = fields.Reference('Many2Many', [
(None, ''),
('test.copy.many2many_reference', 'Many2Many'),
])
many2many_target = fields.Many2One('test.copy.many2many_reference.target',
'Many2ManyReference Target')
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/tests/copy_.py
|
Python
|
gpl-3.0
| 3,032
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.